2024-12-08 07:55:53,019 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-08 07:55:53,030 main DEBUG Took 0.008936 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-08 07:55:53,030 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-08 07:55:53,031 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-08 07:55:53,031 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-08 07:55:53,032 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 07:55:53,041 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-08 07:55:53,052 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 07:55:53,053 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 07:55:53,054 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 07:55:53,055 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 07:55:53,056 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 07:55:53,056 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 07:55:53,057 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 07:55:53,058 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 07:55:53,058 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 07:55:53,059 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 07:55:53,060 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 07:55:53,060 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 07:55:53,061 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 07:55:53,061 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 07:55:53,062 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 07:55:53,062 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 07:55:53,063 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 07:55:53,063 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 07:55:53,064 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 07:55:53,064 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 07:55:53,065 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 07:55:53,065 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 07:55:53,066 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 07:55:53,066 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 07:55:53,067 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 07:55:53,067 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-08 07:55:53,069 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 07:55:53,070 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-08 07:55:53,072 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-08 07:55:53,073 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-08 07:55:53,075 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-08 07:55:53,075 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-08 07:55:53,087 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-08 07:55:53,091 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-08 07:55:53,093 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-08 07:55:53,093 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-08 07:55:53,094 main DEBUG createAppenders(={Console}) 2024-12-08 07:55:53,095 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-12-08 07:55:53,096 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-08 07:55:53,096 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-12-08 07:55:53,097 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-08 07:55:53,097 main DEBUG OutputStream closed 2024-12-08 07:55:53,098 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-08 07:55:53,098 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-08 07:55:53,098 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-12-08 07:55:53,171 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-08 07:55:53,173 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-08 07:55:53,174 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-08 07:55:53,175 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-08 07:55:53,176 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-08 07:55:53,176 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-08 07:55:53,176 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-08 07:55:53,176 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-08 07:55:53,176 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-08 07:55:53,177 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-08 07:55:53,177 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-08 07:55:53,177 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-08 07:55:53,177 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-08 07:55:53,178 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-08 07:55:53,178 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-08 07:55:53,178 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-08 07:55:53,178 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-08 07:55:53,179 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-08 07:55:53,181 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-08 07:55:53,181 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-12-08 07:55:53,182 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-08 07:55:53,182 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-12-08T07:55:53,391 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931 2024-12-08 07:55:53,394 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-08 07:55:53,394 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-08T07:55:53,402 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-12-08T07:55:53,439 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=185, ProcessCount=11, AvailableMemoryMB=9570 2024-12-08T07:55:53,443 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-08T07:55:53,463 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/cluster_58007a53-9667-2455-9e07-b190100b304a, deleteOnExit=true 2024-12-08T07:55:53,464 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-08T07:55:53,465 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/test.cache.data in system properties and HBase conf 2024-12-08T07:55:53,466 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/hadoop.tmp.dir in system properties and HBase conf 2024-12-08T07:55:53,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/hadoop.log.dir in system properties and HBase conf 2024-12-08T07:55:53,468 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-08T07:55:53,469 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-08T07:55:53,469 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-08T07:55:53,587 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-08T07:55:53,670 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-08T07:55:53,673 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-08T07:55:53,673 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-08T07:55:53,673 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-08T07:55:53,674 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T07:55:53,674 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-08T07:55:53,674 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-08T07:55:53,675 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T07:55:53,675 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T07:55:53,676 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-08T07:55:53,676 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/nfs.dump.dir in system properties and HBase conf 2024-12-08T07:55:53,676 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/java.io.tmpdir in system properties and HBase conf 2024-12-08T07:55:53,677 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T07:55:53,677 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-08T07:55:53,677 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-08T07:55:54,067 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T07:55:54,580 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-08T07:55:54,646 INFO [Time-limited test {}] log.Log(170): Logging initialized @2213ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-08T07:55:54,709 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T07:55:54,764 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T07:55:54,782 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T07:55:54,782 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T07:55:54,783 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T07:55:54,795 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T07:55:54,797 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/hadoop.log.dir/,AVAILABLE} 2024-12-08T07:55:54,798 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T07:55:54,958 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/java.io.tmpdir/jetty-localhost-37969-hadoop-hdfs-3_4_1-tests_jar-_-any-1187438301057199457/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T07:55:54,964 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:37969} 2024-12-08T07:55:54,964 INFO [Time-limited test {}] server.Server(415): Started @2532ms 2024-12-08T07:55:54,986 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T07:55:55,486 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T07:55:55,492 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T07:55:55,493 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T07:55:55,493 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T07:55:55,493 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T07:55:55,494 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/hadoop.log.dir/,AVAILABLE} 2024-12-08T07:55:55,495 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T07:55:55,589 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/java.io.tmpdir/jetty-localhost-38821-hadoop-hdfs-3_4_1-tests_jar-_-any-5865958393148807615/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:55:55,590 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:38821} 2024-12-08T07:55:55,590 INFO [Time-limited test {}] server.Server(415): Started @3158ms 2024-12-08T07:55:55,634 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T07:55:55,731 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T07:55:55,738 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T07:55:55,739 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T07:55:55,739 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T07:55:55,740 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T07:55:55,741 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/hadoop.log.dir/,AVAILABLE} 2024-12-08T07:55:55,741 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T07:55:55,856 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/java.io.tmpdir/jetty-localhost-43625-hadoop-hdfs-3_4_1-tests_jar-_-any-11709046193024429773/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:55:55,857 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:43625} 2024-12-08T07:55:55,857 INFO [Time-limited test {}] server.Server(415): Started @3425ms 2024-12-08T07:55:55,860 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T07:55:56,862 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/cluster_58007a53-9667-2455-9e07-b190100b304a/data/data1/current/BP-1744500940-172.17.0.2-1733644554138/current, will proceed with Du for space computation calculation, 2024-12-08T07:55:56,862 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/cluster_58007a53-9667-2455-9e07-b190100b304a/data/data4/current/BP-1744500940-172.17.0.2-1733644554138/current, will proceed with Du for space computation calculation, 2024-12-08T07:55:56,862 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/cluster_58007a53-9667-2455-9e07-b190100b304a/data/data3/current/BP-1744500940-172.17.0.2-1733644554138/current, will proceed with Du for space computation calculation, 2024-12-08T07:55:56,862 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/cluster_58007a53-9667-2455-9e07-b190100b304a/data/data2/current/BP-1744500940-172.17.0.2-1733644554138/current, will proceed with Du for space computation calculation, 2024-12-08T07:55:56,896 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T07:55:56,896 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T07:55:56,940 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1122f839ca969d1a with lease ID 0x86d00f19a54f4b97: Processing first storage report for DS-86434993-08ea-4ba3-aaf9-a4c37db91028 from datanode DatanodeRegistration(127.0.0.1:45537, datanodeUuid=2aa0b056-fb3f-4c5b-9f33-7bfd6e8495c0, infoPort=35201, infoSecurePort=0, ipcPort=41301, storageInfo=lv=-57;cid=testClusterID;nsid=1307492006;c=1733644554138) 2024-12-08T07:55:56,941 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1122f839ca969d1a with lease ID 0x86d00f19a54f4b97: from storage DS-86434993-08ea-4ba3-aaf9-a4c37db91028 node DatanodeRegistration(127.0.0.1:45537, datanodeUuid=2aa0b056-fb3f-4c5b-9f33-7bfd6e8495c0, infoPort=35201, infoSecurePort=0, ipcPort=41301, storageInfo=lv=-57;cid=testClusterID;nsid=1307492006;c=1733644554138), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-08T07:55:56,941 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2c5ac826b3ba11d9 with lease ID 0x86d00f19a54f4b98: Processing first storage report for DS-82ec1916-54f0-417b-bc6d-58cccb8812eb from datanode DatanodeRegistration(127.0.0.1:33323, datanodeUuid=b0265732-bb00-45be-92ad-120975a0f459, infoPort=43545, infoSecurePort=0, ipcPort=37851, storageInfo=lv=-57;cid=testClusterID;nsid=1307492006;c=1733644554138) 2024-12-08T07:55:56,941 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2c5ac826b3ba11d9 with lease ID 0x86d00f19a54f4b98: from storage DS-82ec1916-54f0-417b-bc6d-58cccb8812eb node DatanodeRegistration(127.0.0.1:33323, datanodeUuid=b0265732-bb00-45be-92ad-120975a0f459, infoPort=43545, infoSecurePort=0, ipcPort=37851, storageInfo=lv=-57;cid=testClusterID;nsid=1307492006;c=1733644554138), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:55:56,941 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1122f839ca969d1a with lease ID 0x86d00f19a54f4b97: Processing first storage report for DS-bb934e41-e90b-4634-a4df-5d58ac5286c9 from datanode DatanodeRegistration(127.0.0.1:45537, datanodeUuid=2aa0b056-fb3f-4c5b-9f33-7bfd6e8495c0, infoPort=35201, infoSecurePort=0, ipcPort=41301, storageInfo=lv=-57;cid=testClusterID;nsid=1307492006;c=1733644554138) 2024-12-08T07:55:56,942 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1122f839ca969d1a with lease ID 0x86d00f19a54f4b97: from storage DS-bb934e41-e90b-4634-a4df-5d58ac5286c9 node DatanodeRegistration(127.0.0.1:45537, datanodeUuid=2aa0b056-fb3f-4c5b-9f33-7bfd6e8495c0, infoPort=35201, infoSecurePort=0, ipcPort=41301, storageInfo=lv=-57;cid=testClusterID;nsid=1307492006;c=1733644554138), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:55:56,942 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2c5ac826b3ba11d9 with lease ID 0x86d00f19a54f4b98: Processing first storage report for DS-56a8f6b5-7a43-457d-af94-7cdeb565f7ee from datanode DatanodeRegistration(127.0.0.1:33323, datanodeUuid=b0265732-bb00-45be-92ad-120975a0f459, infoPort=43545, infoSecurePort=0, ipcPort=37851, storageInfo=lv=-57;cid=testClusterID;nsid=1307492006;c=1733644554138) 2024-12-08T07:55:56,942 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2c5ac826b3ba11d9 with lease ID 0x86d00f19a54f4b98: from storage DS-56a8f6b5-7a43-457d-af94-7cdeb565f7ee node DatanodeRegistration(127.0.0.1:33323, datanodeUuid=b0265732-bb00-45be-92ad-120975a0f459, infoPort=43545, infoSecurePort=0, ipcPort=37851, storageInfo=lv=-57;cid=testClusterID;nsid=1307492006;c=1733644554138), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:55:56,965 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931 2024-12-08T07:55:57,025 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/cluster_58007a53-9667-2455-9e07-b190100b304a/zookeeper_0, clientPort=55683, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/cluster_58007a53-9667-2455-9e07-b190100b304a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/cluster_58007a53-9667-2455-9e07-b190100b304a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-08T07:55:57,034 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55683 2024-12-08T07:55:57,043 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:55:57,045 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:55:57,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741825_1001 (size=7) 2024-12-08T07:55:57,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741825_1001 (size=7) 2024-12-08T07:55:57,665 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301 with version=8 2024-12-08T07:55:57,666 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/hbase-staging 2024-12-08T07:55:57,735 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-08T07:55:57,978 INFO [Time-limited test {}] client.ConnectionUtils(128): master/0106a245d0e8:0 server-side Connection retries=45 2024-12-08T07:55:57,986 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T07:55:57,987 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T07:55:57,991 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T07:55:57,991 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T07:55:57,992 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T07:55:58,122 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-08T07:55:58,171 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-08T07:55:58,178 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-08T07:55:58,182 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T07:55:58,203 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 87968 (auto-detected) 2024-12-08T07:55:58,204 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-08T07:55:58,222 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44675 2024-12-08T07:55:58,241 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44675 connecting to ZooKeeper ensemble=127.0.0.1:55683 2024-12-08T07:55:58,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:446750x0, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T07:55:58,369 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44675-0x100046ca1990000 connected 2024-12-08T07:55:58,461 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:55:58,468 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:55:58,479 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44675-0x100046ca1990000, quorum=127.0.0.1:55683, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T07:55:58,483 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301, hbase.cluster.distributed=false 2024-12-08T07:55:58,503 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44675-0x100046ca1990000, quorum=127.0.0.1:55683, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T07:55:58,507 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44675 2024-12-08T07:55:58,507 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44675 2024-12-08T07:55:58,507 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44675 2024-12-08T07:55:58,508 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44675 2024-12-08T07:55:58,508 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44675 2024-12-08T07:55:58,599 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0106a245d0e8:0 server-side Connection retries=45 2024-12-08T07:55:58,600 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T07:55:58,600 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T07:55:58,601 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T07:55:58,601 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T07:55:58,601 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T07:55:58,603 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T07:55:58,605 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T07:55:58,606 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37793 2024-12-08T07:55:58,608 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37793 connecting to ZooKeeper ensemble=127.0.0.1:55683 2024-12-08T07:55:58,609 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:55:58,612 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:55:58,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:377930x0, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T07:55:58,627 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:377930x0, quorum=127.0.0.1:55683, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T07:55:58,628 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37793-0x100046ca1990001 connected 2024-12-08T07:55:58,631 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T07:55:58,639 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T07:55:58,642 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37793-0x100046ca1990001, quorum=127.0.0.1:55683, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T07:55:58,647 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37793-0x100046ca1990001, quorum=127.0.0.1:55683, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T07:55:58,648 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37793 2024-12-08T07:55:58,649 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37793 2024-12-08T07:55:58,651 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37793 2024-12-08T07:55:58,652 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37793 2024-12-08T07:55:58,652 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37793 2024-12-08T07:55:58,670 DEBUG [M:0;0106a245d0e8:44675 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0106a245d0e8:44675 2024-12-08T07:55:58,671 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/0106a245d0e8,44675,1733644557832 2024-12-08T07:55:58,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37793-0x100046ca1990001, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T07:55:58,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44675-0x100046ca1990000, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T07:55:58,681 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44675-0x100046ca1990000, quorum=127.0.0.1:55683, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0106a245d0e8,44675,1733644557832 2024-12-08T07:55:58,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37793-0x100046ca1990001, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T07:55:58,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44675-0x100046ca1990000, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:55:58,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37793-0x100046ca1990001, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:55:58,711 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44675-0x100046ca1990000, quorum=127.0.0.1:55683, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T07:55:58,713 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0106a245d0e8,44675,1733644557832 from backup master directory 2024-12-08T07:55:58,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37793-0x100046ca1990001, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T07:55:58,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44675-0x100046ca1990000, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0106a245d0e8,44675,1733644557832 2024-12-08T07:55:58,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44675-0x100046ca1990000, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T07:55:58,722 WARN [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T07:55:58,722 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0106a245d0e8,44675,1733644557832 2024-12-08T07:55:58,724 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-08T07:55:58,726 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-08T07:55:58,776 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/hbase.id] with ID: e18b7115-33a7-4576-bef7-2d76d2f89a3a 2024-12-08T07:55:58,776 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/.tmp/hbase.id 2024-12-08T07:55:58,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741826_1002 (size=42) 2024-12-08T07:55:58,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741826_1002 (size=42) 2024-12-08T07:55:58,789 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/.tmp/hbase.id]:[hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/hbase.id] 2024-12-08T07:55:58,829 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:55:58,833 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-08T07:55:58,849 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 14ms. 2024-12-08T07:55:58,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37793-0x100046ca1990001, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:55:58,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44675-0x100046ca1990000, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:55:58,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741827_1003 (size=196) 2024-12-08T07:55:58,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741827_1003 (size=196) 2024-12-08T07:55:58,892 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T07:55:58,895 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-08T07:55:58,902 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T07:55:58,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741828_1004 (size=1189) 2024-12-08T07:55:58,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741828_1004 (size=1189) 2024-12-08T07:55:58,951 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/MasterData/data/master/store 2024-12-08T07:55:58,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741829_1005 (size=34) 2024-12-08T07:55:58,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741829_1005 (size=34) 2024-12-08T07:55:58,975 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-08T07:55:58,979 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T07:55:58,981 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T07:55:58,981 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:55:58,981 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:55:58,983 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T07:55:58,983 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:55:58,984 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:55:58,985 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733644558981Disabling compacts and flushes for region at 1733644558981Disabling writes for close at 1733644558983 (+2 ms)Writing region close event to WAL at 1733644558983Closed at 1733644558983 2024-12-08T07:55:58,987 WARN [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/MasterData/data/master/store/.initializing 2024-12-08T07:55:58,987 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/MasterData/WALs/0106a245d0e8,44675,1733644557832 2024-12-08T07:55:59,007 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0106a245d0e8%2C44675%2C1733644557832, suffix=, logDir=hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/MasterData/WALs/0106a245d0e8,44675,1733644557832, archiveDir=hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/MasterData/oldWALs, maxLogs=10 2024-12-08T07:55:59,016 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C44675%2C1733644557832.1733644559012 2024-12-08T07:55:59,032 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/MasterData/WALs/0106a245d0e8,44675,1733644557832/0106a245d0e8%2C44675%2C1733644557832.1733644559012 2024-12-08T07:55:59,040 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35201:35201),(127.0.0.1/127.0.0.1:43545:43545)] 2024-12-08T07:55:59,041 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-08T07:55:59,041 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T07:55:59,044 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:55:59,045 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:55:59,078 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:55:59,101 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-08T07:55:59,104 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:55:59,106 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:55:59,107 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:55:59,110 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-08T07:55:59,110 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:55:59,111 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T07:55:59,111 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:55:59,114 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-08T07:55:59,114 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:55:59,115 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T07:55:59,115 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:55:59,118 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-08T07:55:59,118 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:55:59,119 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T07:55:59,119 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:55:59,123 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:55:59,124 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:55:59,129 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:55:59,129 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:55:59,132 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T07:55:59,136 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:55:59,140 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T07:55:59,141 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=843592, jitterRate=0.07268291711807251}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T07:55:59,146 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733644559057Initializing all the Stores at 1733644559059 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644559060 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644559060Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644559061 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644559061Cleaning up temporary data from old regions at 1733644559129 (+68 ms)Region opened successfully at 1733644559146 (+17 ms) 2024-12-08T07:55:59,148 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-08T07:55:59,181 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ed6882e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0106a245d0e8/172.17.0.2:0 2024-12-08T07:55:59,210 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-08T07:55:59,220 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-08T07:55:59,220 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-08T07:55:59,223 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-08T07:55:59,225 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-08T07:55:59,229 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-08T07:55:59,229 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-08T07:55:59,252 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-08T07:55:59,260 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44675-0x100046ca1990000, quorum=127.0.0.1:55683, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-08T07:55:59,300 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-08T07:55:59,303 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-08T07:55:59,305 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44675-0x100046ca1990000, quorum=127.0.0.1:55683, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-08T07:55:59,310 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-08T07:55:59,313 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-08T07:55:59,316 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44675-0x100046ca1990000, quorum=127.0.0.1:55683, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-08T07:55:59,321 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-08T07:55:59,323 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44675-0x100046ca1990000, quorum=127.0.0.1:55683, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-08T07:55:59,331 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-08T07:55:59,347 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44675-0x100046ca1990000, quorum=127.0.0.1:55683, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-08T07:55:59,352 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-08T07:55:59,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44675-0x100046ca1990000, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T07:55:59,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37793-0x100046ca1990001, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T07:55:59,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44675-0x100046ca1990000, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:55:59,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37793-0x100046ca1990001, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:55:59,367 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=0106a245d0e8,44675,1733644557832, sessionid=0x100046ca1990000, setting cluster-up flag (Was=false) 2024-12-08T07:55:59,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37793-0x100046ca1990001, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:55:59,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44675-0x100046ca1990000, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:55:59,427 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-08T07:55:59,433 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0106a245d0e8,44675,1733644557832 2024-12-08T07:55:59,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44675-0x100046ca1990000, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:55:59,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37793-0x100046ca1990001, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:55:59,490 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-08T07:55:59,494 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0106a245d0e8,44675,1733644557832 2024-12-08T07:55:59,503 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-08T07:55:59,557 INFO [RS:0;0106a245d0e8:37793 {}] regionserver.HRegionServer(746): ClusterId : e18b7115-33a7-4576-bef7-2d76d2f89a3a 2024-12-08T07:55:59,559 DEBUG [RS:0;0106a245d0e8:37793 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T07:55:59,565 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-08T07:55:59,573 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-08T07:55:59,576 DEBUG [RS:0;0106a245d0e8:37793 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T07:55:59,577 DEBUG [RS:0;0106a245d0e8:37793 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T07:55:59,579 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-08T07:55:59,586 DEBUG [RS:0;0106a245d0e8:37793 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T07:55:59,586 DEBUG [RS:0;0106a245d0e8:37793 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68a13b30, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0106a245d0e8/172.17.0.2:0 2024-12-08T07:55:59,584 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0106a245d0e8,44675,1733644557832 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-08T07:55:59,592 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0106a245d0e8:0, corePoolSize=5, maxPoolSize=5 2024-12-08T07:55:59,592 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0106a245d0e8:0, corePoolSize=5, maxPoolSize=5 2024-12-08T07:55:59,592 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0106a245d0e8:0, corePoolSize=5, maxPoolSize=5 2024-12-08T07:55:59,592 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0106a245d0e8:0, corePoolSize=5, maxPoolSize=5 2024-12-08T07:55:59,592 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0106a245d0e8:0, corePoolSize=10, maxPoolSize=10 2024-12-08T07:55:59,593 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:55:59,593 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0106a245d0e8:0, corePoolSize=2, maxPoolSize=2 2024-12-08T07:55:59,593 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:55:59,594 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733644589594 2024-12-08T07:55:59,596 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-08T07:55:59,597 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-08T07:55:59,597 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T07:55:59,598 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-08T07:55:59,601 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-08T07:55:59,601 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-08T07:55:59,602 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-08T07:55:59,602 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-08T07:55:59,602 DEBUG [RS:0;0106a245d0e8:37793 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0106a245d0e8:37793 2024-12-08T07:55:59,603 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:55:59,603 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-08T07:55:59,603 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T07:55:59,606 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-08T07:55:59,606 INFO [RS:0;0106a245d0e8:37793 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T07:55:59,606 INFO [RS:0;0106a245d0e8:37793 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T07:55:59,606 DEBUG [RS:0;0106a245d0e8:37793 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T07:55:59,607 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-08T07:55:59,608 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-08T07:55:59,609 INFO [RS:0;0106a245d0e8:37793 {}] regionserver.HRegionServer(2659): reportForDuty to master=0106a245d0e8,44675,1733644557832 with port=37793, startcode=1733644558567 2024-12-08T07:55:59,610 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-08T07:55:59,610 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-08T07:55:59,612 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.large.0-1733644559612,5,FailOnTimeoutGroup] 2024-12-08T07:55:59,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741831_1007 (size=1321) 2024-12-08T07:55:59,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741831_1007 (size=1321) 2024-12-08T07:55:59,614 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.small.0-1733644559613,5,FailOnTimeoutGroup] 2024-12-08T07:55:59,614 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T07:55:59,614 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-08T07:55:59,615 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-08T07:55:59,616 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-08T07:55:59,616 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301 2024-12-08T07:55:59,616 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-08T07:55:59,621 DEBUG [RS:0;0106a245d0e8:37793 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T07:55:59,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741832_1008 (size=32) 2024-12-08T07:55:59,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741832_1008 (size=32) 2024-12-08T07:55:59,629 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T07:55:59,631 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T07:55:59,634 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T07:55:59,634 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:55:59,635 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:55:59,635 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T07:55:59,638 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T07:55:59,638 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:55:59,639 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:55:59,639 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T07:55:59,641 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T07:55:59,641 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:55:59,642 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:55:59,643 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T07:55:59,645 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T07:55:59,645 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:55:59,646 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:55:59,647 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T07:55:59,648 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/hbase/meta/1588230740 2024-12-08T07:55:59,649 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/hbase/meta/1588230740 2024-12-08T07:55:59,653 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T07:55:59,653 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T07:55:59,654 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T07:55:59,657 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T07:55:59,663 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T07:55:59,664 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=742141, jitterRate=-0.05631956458091736}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T07:55:59,667 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733644559629Initializing all the Stores at 1733644559631 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644559631Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644559631Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644559631Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644559631Cleaning up temporary data from old regions at 1733644559653 (+22 ms)Region opened successfully at 1733644559667 (+14 ms) 2024-12-08T07:55:59,667 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T07:55:59,667 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T07:55:59,668 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T07:55:59,668 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T07:55:59,668 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T07:55:59,669 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T07:55:59,669 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733644559667Disabling compacts and flushes for region at 1733644559667Disabling writes for close at 1733644559668 (+1 ms)Writing region close event to WAL at 1733644559669 (+1 ms)Closed at 1733644559669 2024-12-08T07:55:59,672 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T07:55:59,672 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-08T07:55:59,678 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-08T07:55:59,685 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42493, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T07:55:59,689 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T07:55:59,692 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-08T07:55:59,692 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44675 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0106a245d0e8,37793,1733644558567 2024-12-08T07:55:59,695 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44675 {}] master.ServerManager(517): Registering regionserver=0106a245d0e8,37793,1733644558567 2024-12-08T07:55:59,708 DEBUG [RS:0;0106a245d0e8:37793 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301 2024-12-08T07:55:59,708 DEBUG [RS:0;0106a245d0e8:37793 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35577 2024-12-08T07:55:59,708 DEBUG [RS:0;0106a245d0e8:37793 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T07:55:59,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44675-0x100046ca1990000, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T07:55:59,722 DEBUG [RS:0;0106a245d0e8:37793 {}] zookeeper.ZKUtil(111): regionserver:37793-0x100046ca1990001, quorum=127.0.0.1:55683, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0106a245d0e8,37793,1733644558567 2024-12-08T07:55:59,722 WARN [RS:0;0106a245d0e8:37793 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T07:55:59,722 INFO [RS:0;0106a245d0e8:37793 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T07:55:59,722 DEBUG [RS:0;0106a245d0e8:37793 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567 2024-12-08T07:55:59,725 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0106a245d0e8,37793,1733644558567] 2024-12-08T07:55:59,746 INFO [RS:0;0106a245d0e8:37793 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T07:55:59,756 INFO [RS:0;0106a245d0e8:37793 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T07:55:59,761 INFO [RS:0;0106a245d0e8:37793 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T07:55:59,761 INFO [RS:0;0106a245d0e8:37793 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T07:55:59,762 INFO [RS:0;0106a245d0e8:37793 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T07:55:59,767 INFO [RS:0;0106a245d0e8:37793 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T07:55:59,768 INFO [RS:0;0106a245d0e8:37793 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T07:55:59,769 DEBUG [RS:0;0106a245d0e8:37793 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:55:59,769 DEBUG [RS:0;0106a245d0e8:37793 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:55:59,769 DEBUG [RS:0;0106a245d0e8:37793 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:55:59,769 DEBUG [RS:0;0106a245d0e8:37793 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:55:59,769 DEBUG [RS:0;0106a245d0e8:37793 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:55:59,769 DEBUG [RS:0;0106a245d0e8:37793 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0106a245d0e8:0, corePoolSize=2, maxPoolSize=2 2024-12-08T07:55:59,770 DEBUG [RS:0;0106a245d0e8:37793 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:55:59,770 DEBUG [RS:0;0106a245d0e8:37793 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:55:59,770 DEBUG [RS:0;0106a245d0e8:37793 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:55:59,770 DEBUG [RS:0;0106a245d0e8:37793 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:55:59,770 DEBUG [RS:0;0106a245d0e8:37793 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:55:59,770 DEBUG [RS:0;0106a245d0e8:37793 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:55:59,770 DEBUG [RS:0;0106a245d0e8:37793 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0106a245d0e8:0, corePoolSize=3, maxPoolSize=3 2024-12-08T07:55:59,770 DEBUG [RS:0;0106a245d0e8:37793 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0, corePoolSize=3, maxPoolSize=3 2024-12-08T07:55:59,771 INFO [RS:0;0106a245d0e8:37793 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T07:55:59,772 INFO [RS:0;0106a245d0e8:37793 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T07:55:59,772 INFO [RS:0;0106a245d0e8:37793 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T07:55:59,772 INFO [RS:0;0106a245d0e8:37793 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T07:55:59,772 INFO [RS:0;0106a245d0e8:37793 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T07:55:59,772 INFO [RS:0;0106a245d0e8:37793 {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,37793,1733644558567-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T07:55:59,788 INFO [RS:0;0106a245d0e8:37793 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T07:55:59,789 INFO [RS:0;0106a245d0e8:37793 {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,37793,1733644558567-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T07:55:59,790 INFO [RS:0;0106a245d0e8:37793 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T07:55:59,790 INFO [RS:0;0106a245d0e8:37793 {}] regionserver.Replication(171): 0106a245d0e8,37793,1733644558567 started 2024-12-08T07:55:59,805 INFO [RS:0;0106a245d0e8:37793 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T07:55:59,805 INFO [RS:0;0106a245d0e8:37793 {}] regionserver.HRegionServer(1482): Serving as 0106a245d0e8,37793,1733644558567, RpcServer on 0106a245d0e8/172.17.0.2:37793, sessionid=0x100046ca1990001 2024-12-08T07:55:59,806 DEBUG [RS:0;0106a245d0e8:37793 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T07:55:59,806 DEBUG [RS:0;0106a245d0e8:37793 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0106a245d0e8,37793,1733644558567 2024-12-08T07:55:59,807 DEBUG [RS:0;0106a245d0e8:37793 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0106a245d0e8,37793,1733644558567' 2024-12-08T07:55:59,807 DEBUG [RS:0;0106a245d0e8:37793 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T07:55:59,808 DEBUG [RS:0;0106a245d0e8:37793 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T07:55:59,808 DEBUG [RS:0;0106a245d0e8:37793 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T07:55:59,808 DEBUG [RS:0;0106a245d0e8:37793 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T07:55:59,809 DEBUG [RS:0;0106a245d0e8:37793 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0106a245d0e8,37793,1733644558567 2024-12-08T07:55:59,809 DEBUG [RS:0;0106a245d0e8:37793 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0106a245d0e8,37793,1733644558567' 2024-12-08T07:55:59,809 DEBUG [RS:0;0106a245d0e8:37793 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T07:55:59,810 DEBUG [RS:0;0106a245d0e8:37793 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T07:55:59,810 DEBUG [RS:0;0106a245d0e8:37793 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T07:55:59,810 INFO [RS:0;0106a245d0e8:37793 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T07:55:59,811 INFO [RS:0;0106a245d0e8:37793 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T07:55:59,842 WARN [0106a245d0e8:44675 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-08T07:55:59,922 INFO [RS:0;0106a245d0e8:37793 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0106a245d0e8%2C37793%2C1733644558567, suffix=, logDir=hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567, archiveDir=hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/oldWALs, maxLogs=32 2024-12-08T07:55:59,925 INFO [RS:0;0106a245d0e8:37793 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C37793%2C1733644558567.1733644559924 2024-12-08T07:55:59,933 INFO [RS:0;0106a245d0e8:37793 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567/0106a245d0e8%2C37793%2C1733644558567.1733644559924 2024-12-08T07:55:59,934 DEBUG [RS:0;0106a245d0e8:37793 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43545:43545),(127.0.0.1/127.0.0.1:35201:35201)] 2024-12-08T07:56:00,097 DEBUG [0106a245d0e8:44675 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-08T07:56:00,109 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0106a245d0e8,37793,1733644558567 2024-12-08T07:56:00,114 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0106a245d0e8,37793,1733644558567, state=OPENING 2024-12-08T07:56:00,121 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-08T07:56:00,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44675-0x100046ca1990000, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:56:00,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37793-0x100046ca1990001, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:56:00,133 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T07:56:00,133 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T07:56:00,135 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T07:56:00,139 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=0106a245d0e8,37793,1733644558567}] 2024-12-08T07:56:00,317 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-08T07:56:00,320 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39131, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-08T07:56:00,332 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-08T07:56:00,332 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T07:56:00,336 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0106a245d0e8%2C37793%2C1733644558567.meta, suffix=.meta, logDir=hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567, archiveDir=hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/oldWALs, maxLogs=32 2024-12-08T07:56:00,338 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C37793%2C1733644558567.meta.1733644560338.meta 2024-12-08T07:56:00,346 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567/0106a245d0e8%2C37793%2C1733644558567.meta.1733644560338.meta 2024-12-08T07:56:00,347 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35201:35201),(127.0.0.1/127.0.0.1:43545:43545)] 2024-12-08T07:56:00,349 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-08T07:56:00,351 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-08T07:56:00,353 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-08T07:56:00,357 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-08T07:56:00,361 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-08T07:56:00,362 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T07:56:00,362 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-08T07:56:00,362 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-08T07:56:00,365 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T07:56:00,367 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T07:56:00,367 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:56:00,368 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:56:00,368 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T07:56:00,370 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T07:56:00,370 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:56:00,371 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:56:00,371 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T07:56:00,372 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T07:56:00,372 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:56:00,373 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:56:00,374 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T07:56:00,375 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T07:56:00,375 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:56:00,376 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:56:00,376 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T07:56:00,377 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/hbase/meta/1588230740 2024-12-08T07:56:00,380 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/hbase/meta/1588230740 2024-12-08T07:56:00,382 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T07:56:00,383 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T07:56:00,383 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T07:56:00,386 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T07:56:00,387 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=730425, jitterRate=-0.07121708989143372}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T07:56:00,388 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-08T07:56:00,389 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733644560363Writing region info on filesystem at 1733644560363Initializing all the Stores at 1733644560365 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644560365Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644560365Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644560365Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644560365Cleaning up temporary data from old regions at 1733644560383 (+18 ms)Running coprocessor post-open hooks at 1733644560388 (+5 ms)Region opened successfully at 1733644560389 (+1 ms) 2024-12-08T07:56:00,395 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733644560308 2024-12-08T07:56:00,406 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-08T07:56:00,406 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-08T07:56:00,408 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0106a245d0e8,37793,1733644558567 2024-12-08T07:56:00,410 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0106a245d0e8,37793,1733644558567, state=OPEN 2024-12-08T07:56:00,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44675-0x100046ca1990000, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T07:56:00,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37793-0x100046ca1990001, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T07:56:00,491 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T07:56:00,491 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T07:56:00,491 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=0106a245d0e8,37793,1733644558567 2024-12-08T07:56:00,498 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-08T07:56:00,499 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=0106a245d0e8,37793,1733644558567 in 354 msec 2024-12-08T07:56:00,506 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-08T07:56:00,506 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 822 msec 2024-12-08T07:56:00,508 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T07:56:00,508 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-08T07:56:00,529 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T07:56:00,530 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0106a245d0e8,37793,1733644558567, seqNum=-1] 2024-12-08T07:56:00,547 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T07:56:00,549 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34565, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T07:56:00,569 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0390 sec 2024-12-08T07:56:00,569 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733644560569, completionTime=-1 2024-12-08T07:56:00,571 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-08T07:56:00,571 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-08T07:56:00,598 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-08T07:56:00,598 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733644620598 2024-12-08T07:56:00,598 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733644680598 2024-12-08T07:56:00,598 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 26 msec 2024-12-08T07:56:00,600 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,44675,1733644557832-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T07:56:00,601 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,44675,1733644557832-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T07:56:00,601 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,44675,1733644557832-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T07:56:00,602 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0106a245d0e8:44675, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T07:56:00,603 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-08T07:56:00,603 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-08T07:56:00,609 DEBUG [master/0106a245d0e8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-08T07:56:00,628 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.906sec 2024-12-08T07:56:00,629 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-08T07:56:00,630 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-08T07:56:00,631 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-08T07:56:00,632 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-08T07:56:00,632 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-08T07:56:00,633 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,44675,1733644557832-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T07:56:00,633 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,44675,1733644557832-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-08T07:56:00,641 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-08T07:56:00,642 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-08T07:56:00,642 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,44675,1733644557832-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T07:56:00,684 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66cb686a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T07:56:00,686 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-08T07:56:00,686 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-08T07:56:00,689 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 0106a245d0e8,44675,-1 for getting cluster id 2024-12-08T07:56:00,692 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-08T07:56:00,700 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e18b7115-33a7-4576-bef7-2d76d2f89a3a' 2024-12-08T07:56:00,706 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-08T07:56:00,706 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e18b7115-33a7-4576-bef7-2d76d2f89a3a" 2024-12-08T07:56:00,707 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36b647da, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T07:56:00,707 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [0106a245d0e8,44675,-1] 2024-12-08T07:56:00,709 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-08T07:56:00,711 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T07:56:00,713 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55484, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-08T07:56:00,716 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32fb6022, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T07:56:00,716 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T07:56:00,723 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0106a245d0e8,37793,1733644558567, seqNum=-1] 2024-12-08T07:56:00,724 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T07:56:00,726 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37692, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T07:56:00,745 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=0106a245d0e8,44675,1733644557832 2024-12-08T07:56:00,746 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:56:00,751 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-08T07:56:00,755 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-08T07:56:00,759 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 0106a245d0e8,44675,1733644557832 2024-12-08T07:56:00,761 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2081a17b 2024-12-08T07:56:00,762 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T07:56:00,764 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55498, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T07:56:00,766 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44675 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-08T07:56:00,766 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44675 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-08T07:56:00,769 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44675 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T07:56:00,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44675 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-12-08T07:56:00,778 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T07:56:00,780 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44675 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-12-08T07:56:00,781 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:56:00,783 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T07:56:00,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44675 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T07:56:00,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741835_1011 (size=389) 2024-12-08T07:56:00,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741835_1011 (size=389) 2024-12-08T07:56:00,817 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 54f94134ed484eb6760ede13ef02eff1, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301 2024-12-08T07:56:00,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741836_1012 (size=72) 2024-12-08T07:56:00,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741836_1012 (size=72) 2024-12-08T07:56:00,830 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T07:56:00,830 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 54f94134ed484eb6760ede13ef02eff1, disabling compactions & flushes 2024-12-08T07:56:00,830 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1. 2024-12-08T07:56:00,830 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1. 2024-12-08T07:56:00,830 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1. after waiting 0 ms 2024-12-08T07:56:00,830 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1. 2024-12-08T07:56:00,830 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1. 2024-12-08T07:56:00,830 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 54f94134ed484eb6760ede13ef02eff1: Waiting for close lock at 1733644560830Disabling compacts and flushes for region at 1733644560830Disabling writes for close at 1733644560830Writing region close event to WAL at 1733644560830Closed at 1733644560830 2024-12-08T07:56:00,832 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T07:56:00,837 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1733644560833"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733644560833"}]},"ts":"1733644560833"} 2024-12-08T07:56:00,842 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-08T07:56:00,844 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T07:56:00,846 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733644560844"}]},"ts":"1733644560844"} 2024-12-08T07:56:00,850 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-12-08T07:56:00,852 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=54f94134ed484eb6760ede13ef02eff1, ASSIGN}] 2024-12-08T07:56:00,855 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=54f94134ed484eb6760ede13ef02eff1, ASSIGN 2024-12-08T07:56:00,856 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=54f94134ed484eb6760ede13ef02eff1, ASSIGN; state=OFFLINE, location=0106a245d0e8,37793,1733644558567; forceNewPlan=false, retain=false 2024-12-08T07:56:01,009 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=54f94134ed484eb6760ede13ef02eff1, regionState=OPENING, regionLocation=0106a245d0e8,37793,1733644558567 2024-12-08T07:56:01,018 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=54f94134ed484eb6760ede13ef02eff1, ASSIGN because future has completed 2024-12-08T07:56:01,020 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 54f94134ed484eb6760ede13ef02eff1, server=0106a245d0e8,37793,1733644558567}] 2024-12-08T07:56:01,187 INFO [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1. 2024-12-08T07:56:01,188 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 54f94134ed484eb6760ede13ef02eff1, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1.', STARTKEY => '', ENDKEY => ''} 2024-12-08T07:56:01,189 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 54f94134ed484eb6760ede13ef02eff1 2024-12-08T07:56:01,189 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T07:56:01,189 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 54f94134ed484eb6760ede13ef02eff1 2024-12-08T07:56:01,189 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 54f94134ed484eb6760ede13ef02eff1 2024-12-08T07:56:01,191 INFO [StoreOpener-54f94134ed484eb6760ede13ef02eff1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 54f94134ed484eb6760ede13ef02eff1 2024-12-08T07:56:01,194 INFO [StoreOpener-54f94134ed484eb6760ede13ef02eff1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 54f94134ed484eb6760ede13ef02eff1 columnFamilyName info 2024-12-08T07:56:01,194 DEBUG [StoreOpener-54f94134ed484eb6760ede13ef02eff1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:56:01,195 INFO [StoreOpener-54f94134ed484eb6760ede13ef02eff1-1 {}] regionserver.HStore(327): Store=54f94134ed484eb6760ede13ef02eff1/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T07:56:01,196 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 54f94134ed484eb6760ede13ef02eff1 2024-12-08T07:56:01,197 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1 2024-12-08T07:56:01,198 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1 2024-12-08T07:56:01,198 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 54f94134ed484eb6760ede13ef02eff1 2024-12-08T07:56:01,198 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 54f94134ed484eb6760ede13ef02eff1 2024-12-08T07:56:01,201 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 54f94134ed484eb6760ede13ef02eff1 2024-12-08T07:56:01,204 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T07:56:01,205 INFO [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 54f94134ed484eb6760ede13ef02eff1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=740744, jitterRate=-0.05809643864631653}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-08T07:56:01,205 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 54f94134ed484eb6760ede13ef02eff1 2024-12-08T07:56:01,206 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 54f94134ed484eb6760ede13ef02eff1: Running coprocessor pre-open hook at 1733644561189Writing region info on filesystem at 1733644561189Initializing all the Stores at 1733644561191 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644561191Cleaning up temporary data from old regions at 1733644561198 (+7 ms)Running coprocessor post-open hooks at 1733644561205 (+7 ms)Region opened successfully at 1733644561206 (+1 ms) 2024-12-08T07:56:01,208 INFO [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1., pid=6, masterSystemTime=1733644561176 2024-12-08T07:56:01,212 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1. 2024-12-08T07:56:01,212 INFO [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1. 2024-12-08T07:56:01,213 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=54f94134ed484eb6760ede13ef02eff1, regionState=OPEN, openSeqNum=2, regionLocation=0106a245d0e8,37793,1733644558567 2024-12-08T07:56:01,217 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 54f94134ed484eb6760ede13ef02eff1, server=0106a245d0e8,37793,1733644558567 because future has completed 2024-12-08T07:56:01,224 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-08T07:56:01,224 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 54f94134ed484eb6760ede13ef02eff1, server=0106a245d0e8,37793,1733644558567 in 199 msec 2024-12-08T07:56:01,228 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-08T07:56:01,229 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=54f94134ed484eb6760ede13ef02eff1, ASSIGN in 372 msec 2024-12-08T07:56:01,230 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T07:56:01,230 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733644561230"}]},"ts":"1733644561230"} 2024-12-08T07:56:01,234 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-12-08T07:56:01,235 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T07:56:01,238 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 464 msec 2024-12-08T07:56:05,866 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-08T07:56:05,927 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-08T07:56:05,928 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-12-08T07:56:08,169 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-08T07:56:08,170 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-08T07:56:08,173 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-08T07:56:08,173 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-08T07:56:08,175 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T07:56:08,175 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-08T07:56:08,176 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-08T07:56:08,176 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-08T07:56:10,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44675 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T07:56:10,824 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-12-08T07:56:10,830 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-12-08T07:56:10,837 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-12-08T07:56:10,838 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1. 2024-12-08T07:56:10,838 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C37793%2C1733644558567.1733644570838 2024-12-08T07:56:10,851 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:56:10,851 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:56:10,851 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:56:10,851 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:56:10,851 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:56:10,852 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567/0106a245d0e8%2C37793%2C1733644558567.1733644559924 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567/0106a245d0e8%2C37793%2C1733644558567.1733644570838 2024-12-08T07:56:10,853 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43545:43545),(127.0.0.1/127.0.0.1:35201:35201)] 2024-12-08T07:56:10,853 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567/0106a245d0e8%2C37793%2C1733644558567.1733644559924 is not closed yet, will try archiving it next time 2024-12-08T07:56:10,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741833_1009 (size=451) 2024-12-08T07:56:10,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741833_1009 (size=451) 2024-12-08T07:56:10,856 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567/0106a245d0e8%2C37793%2C1733644558567.1733644559924 to hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/oldWALs/0106a245d0e8%2C37793%2C1733644558567.1733644559924 2024-12-08T07:56:10,862 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1., hostname=0106a245d0e8,37793,1733644558567, seqNum=2] 2024-12-08T07:56:22,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37793 {}] regionserver.HRegion(8855): Flush requested on 54f94134ed484eb6760ede13ef02eff1 2024-12-08T07:56:22,964 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 54f94134ed484eb6760ede13ef02eff1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T07:56:23,024 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/.tmp/info/c00040648dbd4a9aa349ae0d07a44f36 is 1080, key is row0001/info:/1733644570865/Put/seqid=0 2024-12-08T07:56:23,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741838_1014 (size=12509) 2024-12-08T07:56:23,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741838_1014 (size=12509) 2024-12-08T07:56:23,037 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/.tmp/info/c00040648dbd4a9aa349ae0d07a44f36 2024-12-08T07:56:23,079 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/.tmp/info/c00040648dbd4a9aa349ae0d07a44f36 as hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/info/c00040648dbd4a9aa349ae0d07a44f36 2024-12-08T07:56:23,090 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/info/c00040648dbd4a9aa349ae0d07a44f36, entries=7, sequenceid=11, filesize=12.2 K 2024-12-08T07:56:23,099 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 54f94134ed484eb6760ede13ef02eff1 in 137ms, sequenceid=11, compaction requested=false 2024-12-08T07:56:23,100 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 54f94134ed484eb6760ede13ef02eff1: 2024-12-08T07:56:26,962 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T07:56:31,001 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C37793%2C1733644558567.1733644591000 2024-12-08T07:56:31,215 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 210 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33323,DS-82ec1916-54f0-417b-bc6d-58cccb8812eb,DISK], DatanodeInfoWithStorage[127.0.0.1:45537,DS-86434993-08ea-4ba3-aaf9-a4c37db91028,DISK]] 2024-12-08T07:56:31,215 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:56:31,215 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:56:31,216 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:56:31,216 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:56:31,216 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:56:31,216 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567/0106a245d0e8%2C37793%2C1733644558567.1733644570838 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567/0106a245d0e8%2C37793%2C1733644558567.1733644591000 2024-12-08T07:56:31,217 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35201:35201),(127.0.0.1/127.0.0.1:43545:43545)] 2024-12-08T07:56:31,217 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567/0106a245d0e8%2C37793%2C1733644558567.1733644570838 is not closed yet, will try archiving it next time 2024-12-08T07:56:31,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741837_1013 (size=12399) 2024-12-08T07:56:31,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741837_1013 (size=12399) 2024-12-08T07:56:31,422 INFO [FSHLog-0-hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301-prefix:0106a245d0e8,37793,1733644558567 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45537,DS-86434993-08ea-4ba3-aaf9-a4c37db91028,DISK], DatanodeInfoWithStorage[127.0.0.1:33323,DS-82ec1916-54f0-417b-bc6d-58cccb8812eb,DISK]] 2024-12-08T07:56:33,628 INFO [FSHLog-0-hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301-prefix:0106a245d0e8,37793,1733644558567 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45537,DS-86434993-08ea-4ba3-aaf9-a4c37db91028,DISK], DatanodeInfoWithStorage[127.0.0.1:33323,DS-82ec1916-54f0-417b-bc6d-58cccb8812eb,DISK]] 2024-12-08T07:56:35,832 INFO [FSHLog-0-hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301-prefix:0106a245d0e8,37793,1733644558567 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45537,DS-86434993-08ea-4ba3-aaf9-a4c37db91028,DISK], DatanodeInfoWithStorage[127.0.0.1:33323,DS-82ec1916-54f0-417b-bc6d-58cccb8812eb,DISK]] 2024-12-08T07:56:38,037 INFO [FSHLog-0-hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301-prefix:0106a245d0e8,37793,1733644558567 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45537,DS-86434993-08ea-4ba3-aaf9-a4c37db91028,DISK], DatanodeInfoWithStorage[127.0.0.1:33323,DS-82ec1916-54f0-417b-bc6d-58cccb8812eb,DISK]] 2024-12-08T07:56:38,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37793 {}] regionserver.HRegion(8855): Flush requested on 54f94134ed484eb6760ede13ef02eff1 2024-12-08T07:56:38,038 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 54f94134ed484eb6760ede13ef02eff1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T07:56:38,242 INFO [FSHLog-0-hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301-prefix:0106a245d0e8,37793,1733644558567 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45537,DS-86434993-08ea-4ba3-aaf9-a4c37db91028,DISK], DatanodeInfoWithStorage[127.0.0.1:33323,DS-82ec1916-54f0-417b-bc6d-58cccb8812eb,DISK]] 2024-12-08T07:56:38,252 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/.tmp/info/07cb2a3f74064a5b994b35ec8d88dea0 is 1080, key is row0008/info:/1733644584960/Put/seqid=0 2024-12-08T07:56:38,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741840_1016 (size=12509) 2024-12-08T07:56:38,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741840_1016 (size=12509) 2024-12-08T07:56:38,268 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/.tmp/info/07cb2a3f74064a5b994b35ec8d88dea0 2024-12-08T07:56:38,280 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/.tmp/info/07cb2a3f74064a5b994b35ec8d88dea0 as hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/info/07cb2a3f74064a5b994b35ec8d88dea0 2024-12-08T07:56:38,292 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/info/07cb2a3f74064a5b994b35ec8d88dea0, entries=7, sequenceid=21, filesize=12.2 K 2024-12-08T07:56:38,494 INFO [FSHLog-0-hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301-prefix:0106a245d0e8,37793,1733644558567 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45537,DS-86434993-08ea-4ba3-aaf9-a4c37db91028,DISK], DatanodeInfoWithStorage[127.0.0.1:33323,DS-82ec1916-54f0-417b-bc6d-58cccb8812eb,DISK]] 2024-12-08T07:56:38,494 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 54f94134ed484eb6760ede13ef02eff1 in 456ms, sequenceid=21, compaction requested=false 2024-12-08T07:56:38,494 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 54f94134ed484eb6760ede13ef02eff1: 2024-12-08T07:56:38,495 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-12-08T07:56:38,495 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T07:56:38,495 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/info/c00040648dbd4a9aa349ae0d07a44f36 because midkey is the same as first or last row 2024-12-08T07:56:40,241 INFO [FSHLog-0-hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301-prefix:0106a245d0e8,37793,1733644558567 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45537,DS-86434993-08ea-4ba3-aaf9-a4c37db91028,DISK], DatanodeInfoWithStorage[127.0.0.1:33323,DS-82ec1916-54f0-417b-bc6d-58cccb8812eb,DISK]] 2024-12-08T07:56:40,644 INFO [master/0106a245d0e8:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-08T07:56:40,645 INFO [master/0106a245d0e8:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-08T07:56:42,445 INFO [FSHLog-0-hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301-prefix:0106a245d0e8,37793,1733644558567 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45537,DS-86434993-08ea-4ba3-aaf9-a4c37db91028,DISK], DatanodeInfoWithStorage[127.0.0.1:33323,DS-82ec1916-54f0-417b-bc6d-58cccb8812eb,DISK]] 2024-12-08T07:56:42,447 WARN [FSHLog-0-hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301-prefix:0106a245d0e8,37793,1733644558567 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45537,DS-86434993-08ea-4ba3-aaf9-a4c37db91028,DISK], DatanodeInfoWithStorage[127.0.0.1:33323,DS-82ec1916-54f0-417b-bc6d-58cccb8812eb,DISK]] 2024-12-08T07:56:42,449 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0106a245d0e8%2C37793%2C1733644558567:(num 1733644591000) roll requested 2024-12-08T07:56:42,450 INFO [regionserver/0106a245d0e8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C37793%2C1733644558567.1733644602450 2024-12-08T07:56:42,662 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 210 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45537,DS-86434993-08ea-4ba3-aaf9-a4c37db91028,DISK], DatanodeInfoWithStorage[127.0.0.1:33323,DS-82ec1916-54f0-417b-bc6d-58cccb8812eb,DISK]] 2024-12-08T07:56:42,663 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:56:42,663 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:56:42,663 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:56:42,663 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:56:42,663 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:56:42,663 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567/0106a245d0e8%2C37793%2C1733644558567.1733644591000 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567/0106a245d0e8%2C37793%2C1733644558567.1733644602450 2024-12-08T07:56:42,664 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43545:43545),(127.0.0.1/127.0.0.1:35201:35201)] 2024-12-08T07:56:42,664 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567/0106a245d0e8%2C37793%2C1733644558567.1733644591000 is not closed yet, will try archiving it next time 2024-12-08T07:56:42,665 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567/0106a245d0e8%2C37793%2C1733644558567.1733644570838 to hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/oldWALs/0106a245d0e8%2C37793%2C1733644558567.1733644570838 2024-12-08T07:56:42,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741839_1015 (size=7739) 2024-12-08T07:56:42,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741839_1015 (size=7739) 2024-12-08T07:56:44,651 INFO [FSHLog-0-hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301-prefix:0106a245d0e8,37793,1733644558567 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33323,DS-82ec1916-54f0-417b-bc6d-58cccb8812eb,DISK], DatanodeInfoWithStorage[127.0.0.1:45537,DS-86434993-08ea-4ba3-aaf9-a4c37db91028,DISK]] 2024-12-08T07:56:46,189 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 54f94134ed484eb6760ede13ef02eff1, had cached 0 bytes from a total of 25018 2024-12-08T07:56:46,855 INFO [FSHLog-0-hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301-prefix:0106a245d0e8,37793,1733644558567 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33323,DS-82ec1916-54f0-417b-bc6d-58cccb8812eb,DISK], DatanodeInfoWithStorage[127.0.0.1:45537,DS-86434993-08ea-4ba3-aaf9-a4c37db91028,DISK]] 2024-12-08T07:56:49,059 INFO [FSHLog-0-hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301-prefix:0106a245d0e8,37793,1733644558567 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33323,DS-82ec1916-54f0-417b-bc6d-58cccb8812eb,DISK], DatanodeInfoWithStorage[127.0.0.1:45537,DS-86434993-08ea-4ba3-aaf9-a4c37db91028,DISK]] 2024-12-08T07:56:51,263 INFO [FSHLog-0-hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301-prefix:0106a245d0e8,37793,1733644558567 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33323,DS-82ec1916-54f0-417b-bc6d-58cccb8812eb,DISK], DatanodeInfoWithStorage[127.0.0.1:45537,DS-86434993-08ea-4ba3-aaf9-a4c37db91028,DISK]] 2024-12-08T07:56:53,264 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-08T07:56:53,265 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C37793%2C1733644558567.1733644613264 2024-12-08T07:56:56,962 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T07:56:58,275 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5008 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33323,DS-82ec1916-54f0-417b-bc6d-58cccb8812eb,DISK], DatanodeInfoWithStorage[127.0.0.1:45537,DS-86434993-08ea-4ba3-aaf9-a4c37db91028,DISK]] 2024-12-08T07:56:58,280 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5008 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33323,DS-82ec1916-54f0-417b-bc6d-58cccb8812eb,DISK], DatanodeInfoWithStorage[127.0.0.1:45537,DS-86434993-08ea-4ba3-aaf9-a4c37db91028,DISK]] 2024-12-08T07:56:58,280 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0106a245d0e8%2C37793%2C1733644558567:(num 1733644613264) roll requested 2024-12-08T07:56:58,280 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:56:58,281 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:56:58,281 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:56:58,281 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:56:58,281 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:56:58,282 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567/0106a245d0e8%2C37793%2C1733644558567.1733644602450 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567/0106a245d0e8%2C37793%2C1733644558567.1733644613264 2024-12-08T07:56:58,284 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43545:43545),(127.0.0.1/127.0.0.1:35201:35201)] 2024-12-08T07:56:58,284 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567/0106a245d0e8%2C37793%2C1733644558567.1733644602450 is not closed yet, will try archiving it next time 2024-12-08T07:56:58,285 INFO [regionserver/0106a245d0e8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C37793%2C1733644558567.1733644618285 2024-12-08T07:56:58,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741841_1017 (size=4753) 2024-12-08T07:56:58,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741841_1017 (size=4753) 2024-12-08T07:57:03,288 INFO [FSHLog-0-hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301-prefix:0106a245d0e8,37793,1733644558567 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33323,DS-82ec1916-54f0-417b-bc6d-58cccb8812eb,DISK], DatanodeInfoWithStorage[127.0.0.1:45537,DS-86434993-08ea-4ba3-aaf9-a4c37db91028,DISK]] 2024-12-08T07:57:03,288 WARN [FSHLog-0-hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301-prefix:0106a245d0e8,37793,1733644558567 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33323,DS-82ec1916-54f0-417b-bc6d-58cccb8812eb,DISK], DatanodeInfoWithStorage[127.0.0.1:45537,DS-86434993-08ea-4ba3-aaf9-a4c37db91028,DISK]] 2024-12-08T07:57:03,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37793 {}] regionserver.HRegion(8855): Flush requested on 54f94134ed484eb6760ede13ef02eff1 2024-12-08T07:57:03,289 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 54f94134ed484eb6760ede13ef02eff1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T07:57:03,295 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5007 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33323,DS-82ec1916-54f0-417b-bc6d-58cccb8812eb,DISK], DatanodeInfoWithStorage[127.0.0.1:45537,DS-86434993-08ea-4ba3-aaf9-a4c37db91028,DISK]] 2024-12-08T07:57:03,295 WARN [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5007 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33323,DS-82ec1916-54f0-417b-bc6d-58cccb8812eb,DISK], DatanodeInfoWithStorage[127.0.0.1:45537,DS-86434993-08ea-4ba3-aaf9-a4c37db91028,DISK]] 2024-12-08T07:57:05,289 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-08T07:57:08,293 INFO [FSHLog-0-hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301-prefix:0106a245d0e8,37793,1733644558567 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33323,DS-82ec1916-54f0-417b-bc6d-58cccb8812eb,DISK], DatanodeInfoWithStorage[127.0.0.1:45537,DS-86434993-08ea-4ba3-aaf9-a4c37db91028,DISK]] 2024-12-08T07:57:08,294 WARN [FSHLog-0-hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301-prefix:0106a245d0e8,37793,1733644558567 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33323,DS-82ec1916-54f0-417b-bc6d-58cccb8812eb,DISK], DatanodeInfoWithStorage[127.0.0.1:45537,DS-86434993-08ea-4ba3-aaf9-a4c37db91028,DISK]] 2024-12-08T07:57:08,294 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:08,295 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:08,295 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:08,296 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:08,296 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:08,297 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567/0106a245d0e8%2C37793%2C1733644558567.1733644613264 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567/0106a245d0e8%2C37793%2C1733644558567.1733644618285 2024-12-08T07:57:08,299 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43545:43545),(127.0.0.1/127.0.0.1:35201:35201)] 2024-12-08T07:57:08,299 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567/0106a245d0e8%2C37793%2C1733644558567.1733644613264 is not closed yet, will try archiving it next time 2024-12-08T07:57:08,299 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0106a245d0e8%2C37793%2C1733644558567:(num 1733644618285) roll requested 2024-12-08T07:57:08,300 INFO [regionserver/0106a245d0e8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C37793%2C1733644558567.1733644628299 2024-12-08T07:57:08,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741842_1018 (size=1569) 2024-12-08T07:57:08,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741842_1018 (size=1569) 2024-12-08T07:57:08,304 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/.tmp/info/77d4ed49b9394037af4a79ccf261c950 is 1080, key is row0015/info:/1733644600040/Put/seqid=0 2024-12-08T07:57:08,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741844_1020 (size=12509) 2024-12-08T07:57:08,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741844_1020 (size=12509) 2024-12-08T07:57:08,313 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/.tmp/info/77d4ed49b9394037af4a79ccf261c950 2024-12-08T07:57:08,323 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/.tmp/info/77d4ed49b9394037af4a79ccf261c950 as hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/info/77d4ed49b9394037af4a79ccf261c950 2024-12-08T07:57:08,332 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/info/77d4ed49b9394037af4a79ccf261c950, entries=7, sequenceid=31, filesize=12.2 K 2024-12-08T07:57:13,308 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33323,DS-82ec1916-54f0-417b-bc6d-58cccb8812eb,DISK], DatanodeInfoWithStorage[127.0.0.1:45537,DS-86434993-08ea-4ba3-aaf9-a4c37db91028,DISK]] 2024-12-08T07:57:13,308 WARN [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33323,DS-82ec1916-54f0-417b-bc6d-58cccb8812eb,DISK], DatanodeInfoWithStorage[127.0.0.1:45537,DS-86434993-08ea-4ba3-aaf9-a4c37db91028,DISK]] 2024-12-08T07:57:13,333 INFO [FSHLog-0-hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301-prefix:0106a245d0e8,37793,1733644558567 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33323,DS-82ec1916-54f0-417b-bc6d-58cccb8812eb,DISK], DatanodeInfoWithStorage[127.0.0.1:45537,DS-86434993-08ea-4ba3-aaf9-a4c37db91028,DISK]] 2024-12-08T07:57:13,333 WARN [FSHLog-0-hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301-prefix:0106a245d0e8,37793,1733644558567 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33323,DS-82ec1916-54f0-417b-bc6d-58cccb8812eb,DISK], DatanodeInfoWithStorage[127.0.0.1:45537,DS-86434993-08ea-4ba3-aaf9-a4c37db91028,DISK]] 2024-12-08T07:57:13,334 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:13,334 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 54f94134ed484eb6760ede13ef02eff1 in 10045ms, sequenceid=31, compaction requested=true 2024-12-08T07:57:13,334 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:13,334 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 54f94134ed484eb6760ede13ef02eff1: 2024-12-08T07:57:13,334 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:13,334 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-12-08T07:57:13,334 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T07:57:13,334 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/info/c00040648dbd4a9aa349ae0d07a44f36 because midkey is the same as first or last row 2024-12-08T07:57:13,334 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:13,334 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:13,334 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567/0106a245d0e8%2C37793%2C1733644558567.1733644618285 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567/0106a245d0e8%2C37793%2C1733644558567.1733644628299 2024-12-08T07:57:13,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 54f94134ed484eb6760ede13ef02eff1:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T07:57:13,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741843_1019 (size=438) 2024-12-08T07:57:13,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741843_1019 (size=438) 2024-12-08T07:57:13,338 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T07:57:13,338 DEBUG [RS:0;0106a245d0e8:37793-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T07:57:13,338 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567/0106a245d0e8%2C37793%2C1733644558567.1733644591000 to hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/oldWALs/0106a245d0e8%2C37793%2C1733644558567.1733644591000 2024-12-08T07:57:13,339 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43545:43545),(127.0.0.1/127.0.0.1:35201:35201)] 2024-12-08T07:57:13,339 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0106a245d0e8%2C37793%2C1733644558567:(num 1733644628299) roll requested 2024-12-08T07:57:13,339 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C37793%2C1733644558567.1733644633339 2024-12-08T07:57:13,340 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567/0106a245d0e8%2C37793%2C1733644558567.1733644602450 to hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/oldWALs/0106a245d0e8%2C37793%2C1733644558567.1733644602450 2024-12-08T07:57:13,343 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567/0106a245d0e8%2C37793%2C1733644558567.1733644613264 to hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/oldWALs/0106a245d0e8%2C37793%2C1733644558567.1733644613264 2024-12-08T07:57:13,343 DEBUG [RS:0;0106a245d0e8:37793-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T07:57:13,346 DEBUG [RS:0;0106a245d0e8:37793-shortCompactions-0 {}] regionserver.HStore(1541): 54f94134ed484eb6760ede13ef02eff1/info is initiating minor compaction (all files) 2024-12-08T07:57:13,347 INFO [RS:0;0106a245d0e8:37793-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 54f94134ed484eb6760ede13ef02eff1/info in TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1. 2024-12-08T07:57:13,347 INFO [RS:0;0106a245d0e8:37793-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/info/c00040648dbd4a9aa349ae0d07a44f36, hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/info/07cb2a3f74064a5b994b35ec8d88dea0, hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/info/77d4ed49b9394037af4a79ccf261c950] into tmpdir=hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/.tmp, totalSize=36.6 K 2024-12-08T07:57:13,350 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567/0106a245d0e8%2C37793%2C1733644558567.1733644618285 to hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/oldWALs/0106a245d0e8%2C37793%2C1733644558567.1733644618285 2024-12-08T07:57:13,350 DEBUG [RS:0;0106a245d0e8:37793-shortCompactions-0 {}] compactions.Compactor(225): Compacting c00040648dbd4a9aa349ae0d07a44f36, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733644570865 2024-12-08T07:57:13,351 DEBUG [RS:0;0106a245d0e8:37793-shortCompactions-0 {}] compactions.Compactor(225): Compacting 07cb2a3f74064a5b994b35ec8d88dea0, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1733644584960 2024-12-08T07:57:13,353 DEBUG [RS:0;0106a245d0e8:37793-shortCompactions-0 {}] compactions.Compactor(225): Compacting 77d4ed49b9394037af4a79ccf261c950, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1733644600040 2024-12-08T07:57:13,379 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:13,379 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:13,380 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:13,380 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:13,380 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:13,380 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567/0106a245d0e8%2C37793%2C1733644558567.1733644628299 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567/0106a245d0e8%2C37793%2C1733644558567.1733644633339 2024-12-08T07:57:13,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741845_1021 (size=93) 2024-12-08T07:57:13,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741845_1021 (size=93) 2024-12-08T07:57:13,384 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567/0106a245d0e8%2C37793%2C1733644558567.1733644628299 to hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/oldWALs/0106a245d0e8%2C37793%2C1733644558567.1733644628299 2024-12-08T07:57:13,393 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35201:35201),(127.0.0.1/127.0.0.1:43545:43545)] 2024-12-08T07:57:13,393 INFO [regionserver/0106a245d0e8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C37793%2C1733644558567.1733644633393 2024-12-08T07:57:13,407 INFO [RS:0;0106a245d0e8:37793-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 54f94134ed484eb6760ede13ef02eff1#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T07:57:13,408 DEBUG [RS:0;0106a245d0e8:37793-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/.tmp/info/4e28cabe00f14e96b7e3b5685b42404c is 1080, key is row0001/info:/1733644570865/Put/seqid=0 2024-12-08T07:57:13,416 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:13,417 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:13,417 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:13,417 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:13,417 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:13,418 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567/0106a245d0e8%2C37793%2C1733644558567.1733644633339 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/WALs/0106a245d0e8,37793,1733644558567/0106a245d0e8%2C37793%2C1733644558567.1733644633393 2024-12-08T07:57:13,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741846_1022 (size=1258) 2024-12-08T07:57:13,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741846_1022 (size=1258) 2024-12-08T07:57:13,429 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35201:35201),(127.0.0.1/127.0.0.1:43545:43545)] 2024-12-08T07:57:13,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741848_1024 (size=27710) 2024-12-08T07:57:13,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741848_1024 (size=27710) 2024-12-08T07:57:13,451 DEBUG [RS:0;0106a245d0e8:37793-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/.tmp/info/4e28cabe00f14e96b7e3b5685b42404c as hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/info/4e28cabe00f14e96b7e3b5685b42404c 2024-12-08T07:57:13,470 INFO [RS:0;0106a245d0e8:37793-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 54f94134ed484eb6760ede13ef02eff1/info of 54f94134ed484eb6760ede13ef02eff1 into 4e28cabe00f14e96b7e3b5685b42404c(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T07:57:13,470 DEBUG [RS:0;0106a245d0e8:37793-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 54f94134ed484eb6760ede13ef02eff1: 2024-12-08T07:57:13,473 INFO [RS:0;0106a245d0e8:37793-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1., storeName=54f94134ed484eb6760ede13ef02eff1/info, priority=13, startTime=1733644633335; duration=0sec 2024-12-08T07:57:13,473 DEBUG [RS:0;0106a245d0e8:37793-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-08T07:57:13,473 DEBUG [RS:0;0106a245d0e8:37793-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T07:57:13,473 DEBUG [RS:0;0106a245d0e8:37793-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/info/4e28cabe00f14e96b7e3b5685b42404c because midkey is the same as first or last row 2024-12-08T07:57:13,474 DEBUG [RS:0;0106a245d0e8:37793-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-08T07:57:13,474 DEBUG [RS:0;0106a245d0e8:37793-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T07:57:13,474 DEBUG [RS:0;0106a245d0e8:37793-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/info/4e28cabe00f14e96b7e3b5685b42404c because midkey is the same as first or last row 2024-12-08T07:57:13,474 DEBUG [RS:0;0106a245d0e8:37793-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-08T07:57:13,474 DEBUG [RS:0;0106a245d0e8:37793-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T07:57:13,475 DEBUG [RS:0;0106a245d0e8:37793-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/info/4e28cabe00f14e96b7e3b5685b42404c because midkey is the same as first or last row 2024-12-08T07:57:13,475 DEBUG [RS:0;0106a245d0e8:37793-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T07:57:13,475 DEBUG [RS:0;0106a245d0e8:37793-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 54f94134ed484eb6760ede13ef02eff1:info 2024-12-08T07:57:25,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37793 {}] regionserver.HRegion(8855): Flush requested on 54f94134ed484eb6760ede13ef02eff1 2024-12-08T07:57:25,427 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 54f94134ed484eb6760ede13ef02eff1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T07:57:25,437 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/.tmp/info/60b97a0aa0ed4b88b31c2e22ec56aa80 is 1080, key is row0022/info:/1733644633395/Put/seqid=0 2024-12-08T07:57:25,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741849_1025 (size=12509) 2024-12-08T07:57:25,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741849_1025 (size=12509) 2024-12-08T07:57:25,444 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/.tmp/info/60b97a0aa0ed4b88b31c2e22ec56aa80 2024-12-08T07:57:25,454 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/.tmp/info/60b97a0aa0ed4b88b31c2e22ec56aa80 as hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/info/60b97a0aa0ed4b88b31c2e22ec56aa80 2024-12-08T07:57:25,463 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/info/60b97a0aa0ed4b88b31c2e22ec56aa80, entries=7, sequenceid=42, filesize=12.2 K 2024-12-08T07:57:25,465 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 54f94134ed484eb6760ede13ef02eff1 in 39ms, sequenceid=42, compaction requested=false 2024-12-08T07:57:25,465 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 54f94134ed484eb6760ede13ef02eff1: 2024-12-08T07:57:25,465 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-12-08T07:57:25,465 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T07:57:25,465 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/info/4e28cabe00f14e96b7e3b5685b42404c because midkey is the same as first or last row 2024-12-08T07:57:26,963 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T07:57:31,189 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 54f94134ed484eb6760ede13ef02eff1, had cached 0 bytes from a total of 40219 2024-12-08T07:57:33,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-08T07:57:33,446 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T07:57:33,447 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T07:57:33,454 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T07:57:33,455 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T07:57:33,455 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-08T07:57:33,455 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-08T07:57:33,455 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1496649153, stopped=false 2024-12-08T07:57:33,455 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=0106a245d0e8,44675,1733644557832 2024-12-08T07:57:33,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44675-0x100046ca1990000, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T07:57:33,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37793-0x100046ca1990001, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T07:57:33,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44675-0x100046ca1990000, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:57:33,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37793-0x100046ca1990001, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:57:33,505 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T07:57:33,505 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T07:57:33,506 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T07:57:33,506 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T07:57:33,506 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37793-0x100046ca1990001, quorum=127.0.0.1:55683, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T07:57:33,506 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44675-0x100046ca1990000, quorum=127.0.0.1:55683, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T07:57:33,507 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0106a245d0e8,37793,1733644558567' ***** 2024-12-08T07:57:33,507 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T07:57:33,507 INFO [RS:0;0106a245d0e8:37793 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T07:57:33,508 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T07:57:33,508 INFO [RS:0;0106a245d0e8:37793 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T07:57:33,508 INFO [RS:0;0106a245d0e8:37793 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T07:57:33,509 INFO [RS:0;0106a245d0e8:37793 {}] regionserver.HRegionServer(3091): Received CLOSE for 54f94134ed484eb6760ede13ef02eff1 2024-12-08T07:57:33,509 INFO [RS:0;0106a245d0e8:37793 {}] regionserver.HRegionServer(959): stopping server 0106a245d0e8,37793,1733644558567 2024-12-08T07:57:33,509 INFO [RS:0;0106a245d0e8:37793 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T07:57:33,510 INFO [RS:0;0106a245d0e8:37793 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;0106a245d0e8:37793. 2024-12-08T07:57:33,510 DEBUG [RS:0;0106a245d0e8:37793 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T07:57:33,510 DEBUG [RS:0;0106a245d0e8:37793 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T07:57:33,510 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 54f94134ed484eb6760ede13ef02eff1, disabling compactions & flushes 2024-12-08T07:57:33,510 INFO [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1. 2024-12-08T07:57:33,510 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1. 2024-12-08T07:57:33,510 INFO [RS:0;0106a245d0e8:37793 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T07:57:33,510 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1. after waiting 0 ms 2024-12-08T07:57:33,510 INFO [RS:0;0106a245d0e8:37793 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T07:57:33,510 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1. 2024-12-08T07:57:33,511 INFO [RS:0;0106a245d0e8:37793 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T07:57:33,511 INFO [RS:0;0106a245d0e8:37793 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-08T07:57:33,511 INFO [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 54f94134ed484eb6760ede13ef02eff1 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-12-08T07:57:33,512 INFO [RS:0;0106a245d0e8:37793 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-08T07:57:33,512 DEBUG [RS:0;0106a245d0e8:37793 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 54f94134ed484eb6760ede13ef02eff1=TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1.} 2024-12-08T07:57:33,512 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T07:57:33,512 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T07:57:33,512 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T07:57:33,512 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T07:57:33,512 DEBUG [RS:0;0106a245d0e8:37793 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 54f94134ed484eb6760ede13ef02eff1 2024-12-08T07:57:33,512 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T07:57:33,513 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-12-08T07:57:33,518 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/.tmp/info/332322e229ea41ed8d6959396b3206ec is 1080, key is row0029/info:/1733644647430/Put/seqid=0 2024-12-08T07:57:33,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741850_1026 (size=8193) 2024-12-08T07:57:33,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741850_1026 (size=8193) 2024-12-08T07:57:33,528 INFO [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/.tmp/info/332322e229ea41ed8d6959396b3206ec 2024-12-08T07:57:33,538 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/.tmp/info/332322e229ea41ed8d6959396b3206ec as hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/info/332322e229ea41ed8d6959396b3206ec 2024-12-08T07:57:33,539 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/hbase/meta/1588230740/.tmp/info/658a035b819a409ea32780a9978c6b25 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1./info:regioninfo/1733644561213/Put/seqid=0 2024-12-08T07:57:33,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741851_1027 (size=7016) 2024-12-08T07:57:33,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741851_1027 (size=7016) 2024-12-08T07:57:33,546 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/hbase/meta/1588230740/.tmp/info/658a035b819a409ea32780a9978c6b25 2024-12-08T07:57:33,549 INFO [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/info/332322e229ea41ed8d6959396b3206ec, entries=3, sequenceid=48, filesize=8.0 K 2024-12-08T07:57:33,550 INFO [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 54f94134ed484eb6760ede13ef02eff1 in 39ms, sequenceid=48, compaction requested=true 2024-12-08T07:57:33,551 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/info/c00040648dbd4a9aa349ae0d07a44f36, hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/info/07cb2a3f74064a5b994b35ec8d88dea0, hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/info/77d4ed49b9394037af4a79ccf261c950] to archive 2024-12-08T07:57:33,554 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T07:57:33,558 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/info/c00040648dbd4a9aa349ae0d07a44f36 to hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/archive/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/info/c00040648dbd4a9aa349ae0d07a44f36 2024-12-08T07:57:33,560 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/info/07cb2a3f74064a5b994b35ec8d88dea0 to hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/archive/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/info/07cb2a3f74064a5b994b35ec8d88dea0 2024-12-08T07:57:33,562 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/info/77d4ed49b9394037af4a79ccf261c950 to hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/archive/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/info/77d4ed49b9394037af4a79ccf261c950 2024-12-08T07:57:33,570 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/hbase/meta/1588230740/.tmp/ns/c0649b297cc8419fab732f4184dc6d8b is 43, key is default/ns:d/1733644560553/Put/seqid=0 2024-12-08T07:57:33,572 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=0106a245d0e8:44675 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-08T07:57:33,576 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [c00040648dbd4a9aa349ae0d07a44f36=12509, 07cb2a3f74064a5b994b35ec8d88dea0=12509, 77d4ed49b9394037af4a79ccf261c950=12509] 2024-12-08T07:57:33,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741852_1028 (size=5153) 2024-12-08T07:57:33,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741852_1028 (size=5153) 2024-12-08T07:57:33,578 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/hbase/meta/1588230740/.tmp/ns/c0649b297cc8419fab732f4184dc6d8b 2024-12-08T07:57:33,581 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/default/TestLogRolling-testSlowSyncLogRolling/54f94134ed484eb6760ede13ef02eff1/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-12-08T07:57:33,584 INFO [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1. 2024-12-08T07:57:33,584 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 54f94134ed484eb6760ede13ef02eff1: Waiting for close lock at 1733644653510Running coprocessor pre-close hooks at 1733644653510Disabling compacts and flushes for region at 1733644653510Disabling writes for close at 1733644653510Obtaining lock to block concurrent updates at 1733644653511 (+1 ms)Preparing flush snapshotting stores in 54f94134ed484eb6760ede13ef02eff1 at 1733644653511Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1733644653511Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1. at 1733644653513 (+2 ms)Flushing 54f94134ed484eb6760ede13ef02eff1/info: creating writer at 1733644653513Flushing 54f94134ed484eb6760ede13ef02eff1/info: appending metadata at 1733644653517 (+4 ms)Flushing 54f94134ed484eb6760ede13ef02eff1/info: closing flushed file at 1733644653517Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@17ec4854: reopening flushed file at 1733644653536 (+19 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 54f94134ed484eb6760ede13ef02eff1 in 39ms, sequenceid=48, compaction requested=true at 1733644653551 (+15 ms)Writing region close event to WAL at 1733644653577 (+26 ms)Running coprocessor post-close hooks at 1733644653582 (+5 ms)Closed at 1733644653584 (+2 ms) 2024-12-08T07:57:33,585 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1733644560765.54f94134ed484eb6760ede13ef02eff1. 2024-12-08T07:57:33,599 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/hbase/meta/1588230740/.tmp/table/7fc253dfce0142419f8d9b113fee7ff5 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1733644561230/Put/seqid=0 2024-12-08T07:57:33,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741853_1029 (size=5396) 2024-12-08T07:57:33,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741853_1029 (size=5396) 2024-12-08T07:57:33,605 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/hbase/meta/1588230740/.tmp/table/7fc253dfce0142419f8d9b113fee7ff5 2024-12-08T07:57:33,613 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/hbase/meta/1588230740/.tmp/info/658a035b819a409ea32780a9978c6b25 as hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/hbase/meta/1588230740/info/658a035b819a409ea32780a9978c6b25 2024-12-08T07:57:33,624 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/hbase/meta/1588230740/info/658a035b819a409ea32780a9978c6b25, entries=10, sequenceid=11, filesize=6.9 K 2024-12-08T07:57:33,625 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/hbase/meta/1588230740/.tmp/ns/c0649b297cc8419fab732f4184dc6d8b as hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/hbase/meta/1588230740/ns/c0649b297cc8419fab732f4184dc6d8b 2024-12-08T07:57:33,632 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/hbase/meta/1588230740/ns/c0649b297cc8419fab732f4184dc6d8b, entries=2, sequenceid=11, filesize=5.0 K 2024-12-08T07:57:33,633 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/hbase/meta/1588230740/.tmp/table/7fc253dfce0142419f8d9b113fee7ff5 as hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/hbase/meta/1588230740/table/7fc253dfce0142419f8d9b113fee7ff5 2024-12-08T07:57:33,640 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/hbase/meta/1588230740/table/7fc253dfce0142419f8d9b113fee7ff5, entries=2, sequenceid=11, filesize=5.3 K 2024-12-08T07:57:33,642 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 129ms, sequenceid=11, compaction requested=false 2024-12-08T07:57:33,647 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-08T07:57:33,648 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T07:57:33,648 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T07:57:33,648 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733644653512Running coprocessor pre-close hooks at 1733644653512Disabling compacts and flushes for region at 1733644653512Disabling writes for close at 1733644653512Obtaining lock to block concurrent updates at 1733644653513 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733644653513Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1733644653513Flushing stores of hbase:meta,,1.1588230740 at 1733644653514 (+1 ms)Flushing 1588230740/info: creating writer at 1733644653514Flushing 1588230740/info: appending metadata at 1733644653538 (+24 ms)Flushing 1588230740/info: closing flushed file at 1733644653538Flushing 1588230740/ns: creating writer at 1733644653555 (+17 ms)Flushing 1588230740/ns: appending metadata at 1733644653569 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733644653569Flushing 1588230740/table: creating writer at 1733644653585 (+16 ms)Flushing 1588230740/table: appending metadata at 1733644653598 (+13 ms)Flushing 1588230740/table: closing flushed file at 1733644653598Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2068d6ee: reopening flushed file at 1733644653612 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@74e84fa0: reopening flushed file at 1733644653624 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@49a5adb2: reopening flushed file at 1733644653632 (+8 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 129ms, sequenceid=11, compaction requested=false at 1733644653642 (+10 ms)Writing region close event to WAL at 1733644653643 (+1 ms)Running coprocessor post-close hooks at 1733644653647 (+4 ms)Closed at 1733644653648 (+1 ms) 2024-12-08T07:57:33,648 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-08T07:57:33,713 INFO [RS:0;0106a245d0e8:37793 {}] regionserver.HRegionServer(976): stopping server 0106a245d0e8,37793,1733644558567; all regions closed. 2024-12-08T07:57:33,716 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:33,716 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:33,716 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:33,717 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:33,717 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:33,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741834_1010 (size=3066) 2024-12-08T07:57:33,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741834_1010 (size=3066) 2024-12-08T07:57:33,726 DEBUG [RS:0;0106a245d0e8:37793 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/oldWALs 2024-12-08T07:57:33,726 INFO [RS:0;0106a245d0e8:37793 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0106a245d0e8%2C37793%2C1733644558567.meta:.meta(num 1733644560338) 2024-12-08T07:57:33,726 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:33,727 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:33,727 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:33,727 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:33,727 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:33,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741847_1023 (size=12695) 2024-12-08T07:57:33,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741847_1023 (size=12695) 2024-12-08T07:57:33,733 DEBUG [RS:0;0106a245d0e8:37793 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/oldWALs 2024-12-08T07:57:33,733 INFO [RS:0;0106a245d0e8:37793 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0106a245d0e8%2C37793%2C1733644558567:(num 1733644633393) 2024-12-08T07:57:33,733 DEBUG [RS:0;0106a245d0e8:37793 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T07:57:33,733 INFO [RS:0;0106a245d0e8:37793 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T07:57:33,733 INFO [RS:0;0106a245d0e8:37793 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T07:57:33,733 INFO [RS:0;0106a245d0e8:37793 {}] hbase.ChoreService(370): Chore service for: regionserver/0106a245d0e8:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-08T07:57:33,734 INFO [RS:0;0106a245d0e8:37793 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T07:57:33,734 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T07:57:33,734 INFO [RS:0;0106a245d0e8:37793 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37793 2024-12-08T07:57:33,747 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37793-0x100046ca1990001, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0106a245d0e8,37793,1733644558567 2024-12-08T07:57:33,747 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44675-0x100046ca1990000, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T07:57:33,747 INFO [RS:0;0106a245d0e8:37793 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T07:57:33,748 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0106a245d0e8,37793,1733644558567] 2024-12-08T07:57:33,768 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0106a245d0e8,37793,1733644558567 already deleted, retry=false 2024-12-08T07:57:33,768 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0106a245d0e8,37793,1733644558567 expired; onlineServers=0 2024-12-08T07:57:33,769 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '0106a245d0e8,44675,1733644557832' ***** 2024-12-08T07:57:33,769 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-08T07:57:33,769 INFO [M:0;0106a245d0e8:44675 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T07:57:33,769 INFO [M:0;0106a245d0e8:44675 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T07:57:33,769 DEBUG [M:0;0106a245d0e8:44675 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-08T07:57:33,770 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-08T07:57:33,770 DEBUG [M:0;0106a245d0e8:44675 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-08T07:57:33,770 DEBUG [master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.small.0-1733644559613 {}] cleaner.HFileCleaner(306): Exit Thread[master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.small.0-1733644559613,5,FailOnTimeoutGroup] 2024-12-08T07:57:33,770 DEBUG [master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.large.0-1733644559612 {}] cleaner.HFileCleaner(306): Exit Thread[master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.large.0-1733644559612,5,FailOnTimeoutGroup] 2024-12-08T07:57:33,770 INFO [M:0;0106a245d0e8:44675 {}] hbase.ChoreService(370): Chore service for: master/0106a245d0e8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-08T07:57:33,771 INFO [M:0;0106a245d0e8:44675 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T07:57:33,771 DEBUG [M:0;0106a245d0e8:44675 {}] master.HMaster(1795): Stopping service threads 2024-12-08T07:57:33,771 INFO [M:0;0106a245d0e8:44675 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-08T07:57:33,771 INFO [M:0;0106a245d0e8:44675 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T07:57:33,772 INFO [M:0;0106a245d0e8:44675 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-08T07:57:33,773 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-08T07:57:33,777 INFO [regionserver/0106a245d0e8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T07:57:33,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44675-0x100046ca1990000, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T07:57:33,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44675-0x100046ca1990000, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:57:33,779 DEBUG [M:0;0106a245d0e8:44675 {}] zookeeper.ZKUtil(347): master:44675-0x100046ca1990000, quorum=127.0.0.1:55683, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-08T07:57:33,779 WARN [M:0;0106a245d0e8:44675 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-08T07:57:33,780 INFO [M:0;0106a245d0e8:44675 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/.lastflushedseqids 2024-12-08T07:57:33,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741854_1030 (size=130) 2024-12-08T07:57:33,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741854_1030 (size=130) 2024-12-08T07:57:33,794 INFO [M:0;0106a245d0e8:44675 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-08T07:57:33,795 INFO [M:0;0106a245d0e8:44675 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-08T07:57:33,795 DEBUG [M:0;0106a245d0e8:44675 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T07:57:33,795 INFO [M:0;0106a245d0e8:44675 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:57:33,795 DEBUG [M:0;0106a245d0e8:44675 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:57:33,795 DEBUG [M:0;0106a245d0e8:44675 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T07:57:33,795 DEBUG [M:0;0106a245d0e8:44675 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:57:33,796 INFO [M:0;0106a245d0e8:44675 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-12-08T07:57:33,821 DEBUG [M:0;0106a245d0e8:44675 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ed1ea1340686411880a51bba94cbe04a is 82, key is hbase:meta,,1/info:regioninfo/1733644560407/Put/seqid=0 2024-12-08T07:57:33,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741855_1031 (size=5672) 2024-12-08T07:57:33,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741855_1031 (size=5672) 2024-12-08T07:57:33,828 INFO [M:0;0106a245d0e8:44675 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ed1ea1340686411880a51bba94cbe04a 2024-12-08T07:57:33,850 DEBUG [M:0;0106a245d0e8:44675 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ed820c5a275e4673bfb895b550ed037f is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733644561237/Put/seqid=0 2024-12-08T07:57:33,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741856_1032 (size=6247) 2024-12-08T07:57:33,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741856_1032 (size=6247) 2024-12-08T07:57:33,856 INFO [M:0;0106a245d0e8:44675 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ed820c5a275e4673bfb895b550ed037f 2024-12-08T07:57:33,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37793-0x100046ca1990001, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T07:57:33,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37793-0x100046ca1990001, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T07:57:33,858 INFO [RS:0;0106a245d0e8:37793 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T07:57:33,859 INFO [RS:0;0106a245d0e8:37793 {}] regionserver.HRegionServer(1031): Exiting; stopping=0106a245d0e8,37793,1733644558567; zookeeper connection closed. 2024-12-08T07:57:33,859 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3c2d392f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3c2d392f 2024-12-08T07:57:33,859 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-08T07:57:33,863 INFO [M:0;0106a245d0e8:44675 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ed820c5a275e4673bfb895b550ed037f 2024-12-08T07:57:33,877 DEBUG [M:0;0106a245d0e8:44675 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1f9c8f7efd13408885b84366e09bc701 is 69, key is 0106a245d0e8,37793,1733644558567/rs:state/1733644559698/Put/seqid=0 2024-12-08T07:57:33,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741857_1033 (size=5156) 2024-12-08T07:57:33,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741857_1033 (size=5156) 2024-12-08T07:57:33,884 INFO [M:0;0106a245d0e8:44675 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1f9c8f7efd13408885b84366e09bc701 2024-12-08T07:57:33,907 DEBUG [M:0;0106a245d0e8:44675 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3df06afb72224e37b4caf899b76b80a8 is 52, key is load_balancer_on/state:d/1733644560749/Put/seqid=0 2024-12-08T07:57:33,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741858_1034 (size=5056) 2024-12-08T07:57:33,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741858_1034 (size=5056) 2024-12-08T07:57:33,913 INFO [M:0;0106a245d0e8:44675 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3df06afb72224e37b4caf899b76b80a8 2024-12-08T07:57:33,920 DEBUG [M:0;0106a245d0e8:44675 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ed1ea1340686411880a51bba94cbe04a as hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ed1ea1340686411880a51bba94cbe04a 2024-12-08T07:57:33,927 INFO [M:0;0106a245d0e8:44675 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ed1ea1340686411880a51bba94cbe04a, entries=8, sequenceid=59, filesize=5.5 K 2024-12-08T07:57:33,929 DEBUG [M:0;0106a245d0e8:44675 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ed820c5a275e4673bfb895b550ed037f as hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ed820c5a275e4673bfb895b550ed037f 2024-12-08T07:57:33,935 INFO [M:0;0106a245d0e8:44675 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ed820c5a275e4673bfb895b550ed037f 2024-12-08T07:57:33,935 INFO [M:0;0106a245d0e8:44675 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ed820c5a275e4673bfb895b550ed037f, entries=6, sequenceid=59, filesize=6.1 K 2024-12-08T07:57:33,936 DEBUG [M:0;0106a245d0e8:44675 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1f9c8f7efd13408885b84366e09bc701 as hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1f9c8f7efd13408885b84366e09bc701 2024-12-08T07:57:33,943 INFO [M:0;0106a245d0e8:44675 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1f9c8f7efd13408885b84366e09bc701, entries=1, sequenceid=59, filesize=5.0 K 2024-12-08T07:57:33,944 DEBUG [M:0;0106a245d0e8:44675 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3df06afb72224e37b4caf899b76b80a8 as hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3df06afb72224e37b4caf899b76b80a8 2024-12-08T07:57:33,951 INFO [M:0;0106a245d0e8:44675 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3df06afb72224e37b4caf899b76b80a8, entries=1, sequenceid=59, filesize=4.9 K 2024-12-08T07:57:33,952 INFO [M:0;0106a245d0e8:44675 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 157ms, sequenceid=59, compaction requested=false 2024-12-08T07:57:33,954 INFO [M:0;0106a245d0e8:44675 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:57:33,954 DEBUG [M:0;0106a245d0e8:44675 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733644653795Disabling compacts and flushes for region at 1733644653795Disabling writes for close at 1733644653795Obtaining lock to block concurrent updates at 1733644653796 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733644653796Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1733644653796Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733644653797 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733644653797Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733644653820 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733644653820Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733644653835 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733644653849 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733644653850 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733644653863 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733644653877 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733644653877Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733644653891 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733644653906 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733644653906Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@595a5292: reopening flushed file at 1733644653919 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@27fd5feb: reopening flushed file at 1733644653927 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@46cc08f6: reopening flushed file at 1733644653935 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7add183e: reopening flushed file at 1733644653943 (+8 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 157ms, sequenceid=59, compaction requested=false at 1733644653952 (+9 ms)Writing region close event to WAL at 1733644653954 (+2 ms)Closed at 1733644653954 2024-12-08T07:57:33,955 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:33,955 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:33,955 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:33,955 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:33,955 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:33,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741830_1006 (size=27973) 2024-12-08T07:57:33,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741830_1006 (size=27973) 2024-12-08T07:57:33,958 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T07:57:33,958 INFO [M:0;0106a245d0e8:44675 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-08T07:57:33,958 INFO [M:0;0106a245d0e8:44675 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44675 2024-12-08T07:57:33,959 INFO [M:0;0106a245d0e8:44675 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T07:57:34,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44675-0x100046ca1990000, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T07:57:34,100 INFO [M:0;0106a245d0e8:44675 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T07:57:34,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44675-0x100046ca1990000, quorum=127.0.0.1:55683, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T07:57:34,104 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:57:34,107 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T07:57:34,107 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T07:57:34,107 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T07:57:34,108 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/hadoop.log.dir/,STOPPED} 2024-12-08T07:57:34,110 WARN [BP-1744500940-172.17.0.2-1733644554138 heartbeating to localhost/127.0.0.1:35577 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T07:57:34,110 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T07:57:34,111 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T07:57:34,111 WARN [BP-1744500940-172.17.0.2-1733644554138 heartbeating to localhost/127.0.0.1:35577 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1744500940-172.17.0.2-1733644554138 (Datanode Uuid 2aa0b056-fb3f-4c5b-9f33-7bfd6e8495c0) service to localhost/127.0.0.1:35577 2024-12-08T07:57:34,112 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/cluster_58007a53-9667-2455-9e07-b190100b304a/data/data3/current/BP-1744500940-172.17.0.2-1733644554138 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T07:57:34,112 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/cluster_58007a53-9667-2455-9e07-b190100b304a/data/data4/current/BP-1744500940-172.17.0.2-1733644554138 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T07:57:34,113 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T07:57:34,115 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:57:34,115 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T07:57:34,115 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T07:57:34,115 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T07:57:34,115 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/hadoop.log.dir/,STOPPED} 2024-12-08T07:57:34,117 WARN [BP-1744500940-172.17.0.2-1733644554138 heartbeating to localhost/127.0.0.1:35577 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T07:57:34,117 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T07:57:34,117 WARN [BP-1744500940-172.17.0.2-1733644554138 heartbeating to localhost/127.0.0.1:35577 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1744500940-172.17.0.2-1733644554138 (Datanode Uuid b0265732-bb00-45be-92ad-120975a0f459) service to localhost/127.0.0.1:35577 2024-12-08T07:57:34,117 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T07:57:34,117 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/cluster_58007a53-9667-2455-9e07-b190100b304a/data/data1/current/BP-1744500940-172.17.0.2-1733644554138 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T07:57:34,118 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/cluster_58007a53-9667-2455-9e07-b190100b304a/data/data2/current/BP-1744500940-172.17.0.2-1733644554138 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T07:57:34,118 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T07:57:34,128 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T07:57:34,128 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T07:57:34,128 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T07:57:34,128 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T07:57:34,129 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/hadoop.log.dir/,STOPPED} 2024-12-08T07:57:34,136 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-08T07:57:34,165 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-08T07:57:34,173 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35577 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:35577 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35577 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35577 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: master/0106a245d0e8:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35577 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@609afd55 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35577 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35577 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35577 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/0106a245d0e8:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: master/0106a245d0e8:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=181 (was 185), ProcessCount=11 (was 11), AvailableMemoryMB=9365 (was 9570) 2024-12-08T07:57:34,179 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=181, ProcessCount=11, AvailableMemoryMB=9365 2024-12-08T07:57:34,179 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-08T07:57:34,180 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/hadoop.log.dir so I do NOT create it in target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776 2024-12-08T07:57:34,180 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/120a9531-a00a-a282-0d98-8498b0cc8931/hadoop.tmp.dir so I do NOT create it in target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776 2024-12-08T07:57:34,180 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/cluster_d22af574-953c-6bee-ca6f-ac20b217175f, deleteOnExit=true 2024-12-08T07:57:34,180 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-08T07:57:34,180 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/test.cache.data in system properties and HBase conf 2024-12-08T07:57:34,180 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/hadoop.tmp.dir in system properties and HBase conf 2024-12-08T07:57:34,180 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/hadoop.log.dir in system properties and HBase conf 2024-12-08T07:57:34,180 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-08T07:57:34,180 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-08T07:57:34,180 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-08T07:57:34,180 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-08T07:57:34,181 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-08T07:57:34,181 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-08T07:57:34,181 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-08T07:57:34,181 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T07:57:34,181 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-08T07:57:34,181 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-08T07:57:34,181 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T07:57:34,181 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T07:57:34,181 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-08T07:57:34,181 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/nfs.dump.dir in system properties and HBase conf 2024-12-08T07:57:34,182 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/java.io.tmpdir in system properties and HBase conf 2024-12-08T07:57:34,182 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T07:57:34,182 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-08T07:57:34,182 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-08T07:57:34,193 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T07:57:34,668 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T07:57:34,672 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T07:57:34,674 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T07:57:34,674 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T07:57:34,674 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T07:57:34,674 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T07:57:34,675 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65506a11{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/hadoop.log.dir/,AVAILABLE} 2024-12-08T07:57:34,675 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75cbfab9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T07:57:34,768 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@493d1d34{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/java.io.tmpdir/jetty-localhost-45467-hadoop-hdfs-3_4_1-tests_jar-_-any-7300769968899548883/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T07:57:34,768 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6a249094{HTTP/1.1, (http/1.1)}{localhost:45467} 2024-12-08T07:57:34,768 INFO [Time-limited test {}] server.Server(415): Started @102336ms 2024-12-08T07:57:34,779 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T07:57:35,037 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T07:57:35,043 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T07:57:35,044 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T07:57:35,044 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T07:57:35,044 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T07:57:35,044 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c8914e7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/hadoop.log.dir/,AVAILABLE} 2024-12-08T07:57:35,045 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3aa9354f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T07:57:35,135 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@52b07bdb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/java.io.tmpdir/jetty-localhost-46623-hadoop-hdfs-3_4_1-tests_jar-_-any-3520247233038173697/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:57:35,136 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@70121b28{HTTP/1.1, (http/1.1)}{localhost:46623} 2024-12-08T07:57:35,136 INFO [Time-limited test {}] server.Server(415): Started @102704ms 2024-12-08T07:57:35,137 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T07:57:35,171 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T07:57:35,174 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T07:57:35,175 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T07:57:35,175 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T07:57:35,175 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T07:57:35,176 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7b915b67{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/hadoop.log.dir/,AVAILABLE} 2024-12-08T07:57:35,176 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1bc8c098{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T07:57:35,267 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3f5c23ef{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/java.io.tmpdir/jetty-localhost-38541-hadoop-hdfs-3_4_1-tests_jar-_-any-7268060234565845198/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:57:35,267 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@716c7b87{HTTP/1.1, (http/1.1)}{localhost:38541} 2024-12-08T07:57:35,267 INFO [Time-limited test {}] server.Server(415): Started @102835ms 2024-12-08T07:57:35,269 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T07:57:36,314 WARN [Thread-453 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/cluster_d22af574-953c-6bee-ca6f-ac20b217175f/data/data1/current/BP-1534295543-172.17.0.2-1733644654204/current, will proceed with Du for space computation calculation, 2024-12-08T07:57:36,314 WARN [Thread-454 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/cluster_d22af574-953c-6bee-ca6f-ac20b217175f/data/data2/current/BP-1534295543-172.17.0.2-1733644654204/current, will proceed with Du for space computation calculation, 2024-12-08T07:57:36,337 WARN [Thread-417 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T07:57:36,339 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe115a3c18a7c4ca with lease ID 0x4f8d6ce80dc29aae: Processing first storage report for DS-da211c48-3b7b-4716-a53a-1e609efe5b8a from datanode DatanodeRegistration(127.0.0.1:35633, datanodeUuid=aa9f56e8-f9cd-462f-ac75-df8769b6dc73, infoPort=36733, infoSecurePort=0, ipcPort=34299, storageInfo=lv=-57;cid=testClusterID;nsid=2021750212;c=1733644654204) 2024-12-08T07:57:36,339 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe115a3c18a7c4ca with lease ID 0x4f8d6ce80dc29aae: from storage DS-da211c48-3b7b-4716-a53a-1e609efe5b8a node DatanodeRegistration(127.0.0.1:35633, datanodeUuid=aa9f56e8-f9cd-462f-ac75-df8769b6dc73, infoPort=36733, infoSecurePort=0, ipcPort=34299, storageInfo=lv=-57;cid=testClusterID;nsid=2021750212;c=1733644654204), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:57:36,339 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe115a3c18a7c4ca with lease ID 0x4f8d6ce80dc29aae: Processing first storage report for DS-93bf8d82-07a6-470f-a62d-aba4f92f50c9 from datanode DatanodeRegistration(127.0.0.1:35633, datanodeUuid=aa9f56e8-f9cd-462f-ac75-df8769b6dc73, infoPort=36733, infoSecurePort=0, ipcPort=34299, storageInfo=lv=-57;cid=testClusterID;nsid=2021750212;c=1733644654204) 2024-12-08T07:57:36,339 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe115a3c18a7c4ca with lease ID 0x4f8d6ce80dc29aae: from storage DS-93bf8d82-07a6-470f-a62d-aba4f92f50c9 node DatanodeRegistration(127.0.0.1:35633, datanodeUuid=aa9f56e8-f9cd-462f-ac75-df8769b6dc73, infoPort=36733, infoSecurePort=0, ipcPort=34299, storageInfo=lv=-57;cid=testClusterID;nsid=2021750212;c=1733644654204), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:57:36,490 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/cluster_d22af574-953c-6bee-ca6f-ac20b217175f/data/data4/current/BP-1534295543-172.17.0.2-1733644654204/current, will proceed with Du for space computation calculation, 2024-12-08T07:57:36,490 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/cluster_d22af574-953c-6bee-ca6f-ac20b217175f/data/data3/current/BP-1534295543-172.17.0.2-1733644654204/current, will proceed with Du for space computation calculation, 2024-12-08T07:57:36,513 WARN [Thread-440 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T07:57:36,516 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9f42db55a5952f1f with lease ID 0x4f8d6ce80dc29aaf: Processing first storage report for DS-c47bc899-a798-4426-bd7d-1cfdac7bd2af from datanode DatanodeRegistration(127.0.0.1:46235, datanodeUuid=b291005b-25cd-4af2-8e67-6c51982e0316, infoPort=39289, infoSecurePort=0, ipcPort=32991, storageInfo=lv=-57;cid=testClusterID;nsid=2021750212;c=1733644654204) 2024-12-08T07:57:36,516 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9f42db55a5952f1f with lease ID 0x4f8d6ce80dc29aaf: from storage DS-c47bc899-a798-4426-bd7d-1cfdac7bd2af node DatanodeRegistration(127.0.0.1:46235, datanodeUuid=b291005b-25cd-4af2-8e67-6c51982e0316, infoPort=39289, infoSecurePort=0, ipcPort=32991, storageInfo=lv=-57;cid=testClusterID;nsid=2021750212;c=1733644654204), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:57:36,516 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9f42db55a5952f1f with lease ID 0x4f8d6ce80dc29aaf: Processing first storage report for DS-88138c0c-bdfd-42ec-9597-4ce8e3fcb9fc from datanode DatanodeRegistration(127.0.0.1:46235, datanodeUuid=b291005b-25cd-4af2-8e67-6c51982e0316, infoPort=39289, infoSecurePort=0, ipcPort=32991, storageInfo=lv=-57;cid=testClusterID;nsid=2021750212;c=1733644654204) 2024-12-08T07:57:36,516 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9f42db55a5952f1f with lease ID 0x4f8d6ce80dc29aaf: from storage DS-88138c0c-bdfd-42ec-9597-4ce8e3fcb9fc node DatanodeRegistration(127.0.0.1:46235, datanodeUuid=b291005b-25cd-4af2-8e67-6c51982e0316, infoPort=39289, infoSecurePort=0, ipcPort=32991, storageInfo=lv=-57;cid=testClusterID;nsid=2021750212;c=1733644654204), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:57:36,608 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776 2024-12-08T07:57:36,613 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/cluster_d22af574-953c-6bee-ca6f-ac20b217175f/zookeeper_0, clientPort=49281, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/cluster_d22af574-953c-6bee-ca6f-ac20b217175f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/cluster_d22af574-953c-6bee-ca6f-ac20b217175f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-08T07:57:36,614 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49281 2024-12-08T07:57:36,614 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:57:36,615 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:57:36,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35633 is added to blk_1073741825_1001 (size=7) 2024-12-08T07:57:36,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46235 is added to blk_1073741825_1001 (size=7) 2024-12-08T07:57:36,626 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37 with version=8 2024-12-08T07:57:36,627 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/hbase-staging 2024-12-08T07:57:36,629 INFO [Time-limited test {}] client.ConnectionUtils(128): master/0106a245d0e8:0 server-side Connection retries=45 2024-12-08T07:57:36,629 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T07:57:36,630 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T07:57:36,630 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T07:57:36,630 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T07:57:36,630 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T07:57:36,630 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-08T07:57:36,630 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T07:57:36,631 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41589 2024-12-08T07:57:36,632 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41589 connecting to ZooKeeper ensemble=127.0.0.1:49281 2024-12-08T07:57:36,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:415890x0, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T07:57:36,701 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41589-0x100046e26ab0000 connected 2024-12-08T07:57:36,800 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:57:36,804 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:57:36,809 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41589-0x100046e26ab0000, quorum=127.0.0.1:49281, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T07:57:36,810 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37, hbase.cluster.distributed=false 2024-12-08T07:57:36,812 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41589-0x100046e26ab0000, quorum=127.0.0.1:49281, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T07:57:36,812 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41589 2024-12-08T07:57:36,812 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41589 2024-12-08T07:57:36,812 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41589 2024-12-08T07:57:36,813 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41589 2024-12-08T07:57:36,813 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41589 2024-12-08T07:57:36,827 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0106a245d0e8:0 server-side Connection retries=45 2024-12-08T07:57:36,828 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T07:57:36,828 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T07:57:36,828 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T07:57:36,828 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T07:57:36,828 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T07:57:36,828 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T07:57:36,828 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T07:57:36,829 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36933 2024-12-08T07:57:36,830 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36933 connecting to ZooKeeper ensemble=127.0.0.1:49281 2024-12-08T07:57:36,830 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:57:36,832 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:57:36,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:369330x0, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T07:57:36,842 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36933-0x100046e26ab0001 connected 2024-12-08T07:57:36,842 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36933-0x100046e26ab0001, quorum=127.0.0.1:49281, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T07:57:36,842 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T07:57:36,843 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T07:57:36,844 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36933-0x100046e26ab0001, quorum=127.0.0.1:49281, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T07:57:36,845 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36933-0x100046e26ab0001, quorum=127.0.0.1:49281, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T07:57:36,846 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36933 2024-12-08T07:57:36,846 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36933 2024-12-08T07:57:36,848 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36933 2024-12-08T07:57:36,849 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36933 2024-12-08T07:57:36,849 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36933 2024-12-08T07:57:36,860 DEBUG [M:0;0106a245d0e8:41589 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0106a245d0e8:41589 2024-12-08T07:57:36,861 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/0106a245d0e8,41589,1733644656629 2024-12-08T07:57:36,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41589-0x100046e26ab0000, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T07:57:36,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36933-0x100046e26ab0001, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T07:57:36,874 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41589-0x100046e26ab0000, quorum=127.0.0.1:49281, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0106a245d0e8,41589,1733644656629 2024-12-08T07:57:36,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36933-0x100046e26ab0001, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T07:57:36,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41589-0x100046e26ab0000, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:57:36,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36933-0x100046e26ab0001, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:57:36,885 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41589-0x100046e26ab0000, quorum=127.0.0.1:49281, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T07:57:36,885 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0106a245d0e8,41589,1733644656629 from backup master directory 2024-12-08T07:57:36,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41589-0x100046e26ab0000, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0106a245d0e8,41589,1733644656629 2024-12-08T07:57:36,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36933-0x100046e26ab0001, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T07:57:36,894 WARN [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T07:57:36,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41589-0x100046e26ab0000, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T07:57:36,894 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0106a245d0e8,41589,1733644656629 2024-12-08T07:57:36,902 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/hbase.id] with ID: bd47c260-1c7a-451e-a542-ac5424636603 2024-12-08T07:57:36,902 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/.tmp/hbase.id 2024-12-08T07:57:36,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35633 is added to blk_1073741826_1002 (size=42) 2024-12-08T07:57:36,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46235 is added to blk_1073741826_1002 (size=42) 2024-12-08T07:57:36,921 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/.tmp/hbase.id]:[hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/hbase.id] 2024-12-08T07:57:36,938 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:57:36,939 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-08T07:57:36,941 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-08T07:57:36,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41589-0x100046e26ab0000, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:57:36,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36933-0x100046e26ab0001, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:57:36,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46235 is added to blk_1073741827_1003 (size=196) 2024-12-08T07:57:36,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35633 is added to blk_1073741827_1003 (size=196) 2024-12-08T07:57:36,960 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T07:57:36,962 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-08T07:57:36,962 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T07:57:36,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35633 is added to blk_1073741828_1004 (size=1189) 2024-12-08T07:57:36,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46235 is added to blk_1073741828_1004 (size=1189) 2024-12-08T07:57:36,976 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/MasterData/data/master/store 2024-12-08T07:57:36,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35633 is added to blk_1073741829_1005 (size=34) 2024-12-08T07:57:36,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46235 is added to blk_1073741829_1005 (size=34) 2024-12-08T07:57:37,388 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T07:57:37,388 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T07:57:37,388 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:57:37,388 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:57:37,388 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T07:57:37,388 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:57:37,388 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:57:37,389 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733644657388Disabling compacts and flushes for region at 1733644657388Disabling writes for close at 1733644657388Writing region close event to WAL at 1733644657388Closed at 1733644657388 2024-12-08T07:57:37,390 WARN [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/MasterData/data/master/store/.initializing 2024-12-08T07:57:37,390 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/MasterData/WALs/0106a245d0e8,41589,1733644656629 2024-12-08T07:57:37,394 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0106a245d0e8%2C41589%2C1733644656629, suffix=, logDir=hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/MasterData/WALs/0106a245d0e8,41589,1733644656629, archiveDir=hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/MasterData/oldWALs, maxLogs=10 2024-12-08T07:57:37,394 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C41589%2C1733644656629.1733644657394 2024-12-08T07:57:37,401 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/MasterData/WALs/0106a245d0e8,41589,1733644656629/0106a245d0e8%2C41589%2C1733644656629.1733644657394 2024-12-08T07:57:37,402 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39289:39289),(127.0.0.1/127.0.0.1:36733:36733)] 2024-12-08T07:57:37,403 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-08T07:57:37,403 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T07:57:37,404 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:57:37,404 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:57:37,408 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:57:37,410 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-08T07:57:37,410 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:57:37,410 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:57:37,411 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:57:37,412 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-08T07:57:37,412 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:57:37,413 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T07:57:37,413 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:57:37,415 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-08T07:57:37,416 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:57:37,416 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T07:57:37,416 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:57:37,418 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-08T07:57:37,418 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:57:37,419 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T07:57:37,419 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:57:37,420 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:57:37,420 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:57:37,422 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:57:37,422 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:57:37,423 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T07:57:37,424 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:57:37,427 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T07:57:37,427 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=755104, jitterRate=-0.039836570620536804}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T07:57:37,428 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733644657404Initializing all the Stores at 1733644657405 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644657405Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644657407 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644657407Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644657407Cleaning up temporary data from old regions at 1733644657422 (+15 ms)Region opened successfully at 1733644657428 (+6 ms) 2024-12-08T07:57:37,429 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-08T07:57:37,432 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2aad27d1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0106a245d0e8/172.17.0.2:0 2024-12-08T07:57:37,433 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-08T07:57:37,434 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-08T07:57:37,434 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-08T07:57:37,434 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-08T07:57:37,434 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-08T07:57:37,435 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-08T07:57:37,435 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-08T07:57:37,437 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-08T07:57:37,438 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41589-0x100046e26ab0000, quorum=127.0.0.1:49281, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-08T07:57:37,483 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-08T07:57:37,484 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-08T07:57:37,485 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41589-0x100046e26ab0000, quorum=127.0.0.1:49281, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-08T07:57:37,494 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-08T07:57:37,495 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-08T07:57:37,496 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41589-0x100046e26ab0000, quorum=127.0.0.1:49281, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-08T07:57:37,505 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-08T07:57:37,506 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41589-0x100046e26ab0000, quorum=127.0.0.1:49281, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-08T07:57:37,515 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-08T07:57:37,518 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41589-0x100046e26ab0000, quorum=127.0.0.1:49281, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-08T07:57:37,526 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-08T07:57:37,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36933-0x100046e26ab0001, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T07:57:37,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41589-0x100046e26ab0000, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T07:57:37,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41589-0x100046e26ab0000, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:57:37,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36933-0x100046e26ab0001, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:57:37,537 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=0106a245d0e8,41589,1733644656629, sessionid=0x100046e26ab0000, setting cluster-up flag (Was=false) 2024-12-08T07:57:37,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36933-0x100046e26ab0001, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:57:37,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41589-0x100046e26ab0000, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:57:37,589 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-08T07:57:37,592 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0106a245d0e8,41589,1733644656629 2024-12-08T07:57:37,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41589-0x100046e26ab0000, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:57:37,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36933-0x100046e26ab0001, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:57:37,642 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-08T07:57:37,646 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0106a245d0e8,41589,1733644656629 2024-12-08T07:57:37,648 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-08T07:57:37,651 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-08T07:57:37,652 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-08T07:57:37,652 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-08T07:57:37,652 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0106a245d0e8,41589,1733644656629 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-08T07:57:37,652 INFO [RS:0;0106a245d0e8:36933 {}] regionserver.HRegionServer(746): ClusterId : bd47c260-1c7a-451e-a542-ac5424636603 2024-12-08T07:57:37,653 DEBUG [RS:0;0106a245d0e8:36933 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T07:57:37,663 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0106a245d0e8:0, corePoolSize=5, maxPoolSize=5 2024-12-08T07:57:37,664 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0106a245d0e8:0, corePoolSize=5, maxPoolSize=5 2024-12-08T07:57:37,664 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0106a245d0e8:0, corePoolSize=5, maxPoolSize=5 2024-12-08T07:57:37,664 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0106a245d0e8:0, corePoolSize=5, maxPoolSize=5 2024-12-08T07:57:37,664 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0106a245d0e8:0, corePoolSize=10, maxPoolSize=10 2024-12-08T07:57:37,664 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:37,664 DEBUG [RS:0;0106a245d0e8:36933 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T07:57:37,664 DEBUG [RS:0;0106a245d0e8:36933 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T07:57:37,664 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0106a245d0e8:0, corePoolSize=2, maxPoolSize=2 2024-12-08T07:57:37,664 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:37,666 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733644687666 2024-12-08T07:57:37,666 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-08T07:57:37,666 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-08T07:57:37,666 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-08T07:57:37,667 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-08T07:57:37,667 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-08T07:57:37,667 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-08T07:57:37,667 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:37,668 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T07:57:37,668 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-08T07:57:37,668 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-08T07:57:37,668 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-08T07:57:37,668 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-08T07:57:37,669 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-08T07:57:37,669 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-08T07:57:37,670 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.large.0-1733644657669,5,FailOnTimeoutGroup] 2024-12-08T07:57:37,670 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.small.0-1733644657670,5,FailOnTimeoutGroup] 2024-12-08T07:57:37,670 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:37,671 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-08T07:57:37,671 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:37,671 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:57:37,671 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:37,671 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-08T07:57:37,674 DEBUG [RS:0;0106a245d0e8:36933 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T07:57:37,675 DEBUG [RS:0;0106a245d0e8:36933 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c020847, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0106a245d0e8/172.17.0.2:0 2024-12-08T07:57:37,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46235 is added to blk_1073741831_1007 (size=1321) 2024-12-08T07:57:37,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35633 is added to blk_1073741831_1007 (size=1321) 2024-12-08T07:57:37,684 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-08T07:57:37,684 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37 2024-12-08T07:57:37,693 DEBUG [RS:0;0106a245d0e8:36933 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0106a245d0e8:36933 2024-12-08T07:57:37,693 INFO [RS:0;0106a245d0e8:36933 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T07:57:37,693 INFO [RS:0;0106a245d0e8:36933 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T07:57:37,693 DEBUG [RS:0;0106a245d0e8:36933 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T07:57:37,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46235 is added to blk_1073741832_1008 (size=32) 2024-12-08T07:57:37,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35633 is added to blk_1073741832_1008 (size=32) 2024-12-08T07:57:37,694 INFO [RS:0;0106a245d0e8:36933 {}] regionserver.HRegionServer(2659): reportForDuty to master=0106a245d0e8,41589,1733644656629 with port=36933, startcode=1733644656827 2024-12-08T07:57:37,694 DEBUG [RS:0;0106a245d0e8:36933 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T07:57:37,695 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T07:57:37,700 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T07:57:37,702 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47231, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T07:57:37,703 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T07:57:37,703 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41589 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0106a245d0e8,36933,1733644656827 2024-12-08T07:57:37,703 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:57:37,703 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41589 {}] master.ServerManager(517): Registering regionserver=0106a245d0e8,36933,1733644656827 2024-12-08T07:57:37,704 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:57:37,704 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T07:57:37,706 DEBUG [RS:0;0106a245d0e8:36933 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37 2024-12-08T07:57:37,706 DEBUG [RS:0;0106a245d0e8:36933 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36351 2024-12-08T07:57:37,706 DEBUG [RS:0;0106a245d0e8:36933 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T07:57:37,709 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T07:57:37,709 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:57:37,713 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:57:37,713 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T07:57:37,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41589-0x100046e26ab0000, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T07:57:37,716 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T07:57:37,716 DEBUG [RS:0;0106a245d0e8:36933 {}] zookeeper.ZKUtil(111): regionserver:36933-0x100046e26ab0001, quorum=127.0.0.1:49281, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0106a245d0e8,36933,1733644656827 2024-12-08T07:57:37,717 WARN [RS:0;0106a245d0e8:36933 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T07:57:37,717 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:57:37,717 INFO [RS:0;0106a245d0e8:36933 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T07:57:37,717 DEBUG [RS:0;0106a245d0e8:36933 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/WALs/0106a245d0e8,36933,1733644656827 2024-12-08T07:57:37,717 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0106a245d0e8,36933,1733644656827] 2024-12-08T07:57:37,717 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:57:37,718 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T07:57:37,720 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T07:57:37,721 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:57:37,722 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:57:37,722 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T07:57:37,723 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/data/hbase/meta/1588230740 2024-12-08T07:57:37,724 INFO [RS:0;0106a245d0e8:36933 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T07:57:37,724 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/data/hbase/meta/1588230740 2024-12-08T07:57:37,726 INFO [RS:0;0106a245d0e8:36933 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T07:57:37,726 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T07:57:37,726 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T07:57:37,727 INFO [RS:0;0106a245d0e8:36933 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T07:57:37,727 INFO [RS:0;0106a245d0e8:36933 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:37,728 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T07:57:37,729 INFO [RS:0;0106a245d0e8:36933 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T07:57:37,730 INFO [RS:0;0106a245d0e8:36933 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T07:57:37,730 INFO [RS:0;0106a245d0e8:36933 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:37,731 DEBUG [RS:0;0106a245d0e8:36933 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:37,731 DEBUG [RS:0;0106a245d0e8:36933 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:37,731 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T07:57:37,731 DEBUG [RS:0;0106a245d0e8:36933 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:37,731 DEBUG [RS:0;0106a245d0e8:36933 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:37,732 DEBUG [RS:0;0106a245d0e8:36933 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:37,732 DEBUG [RS:0;0106a245d0e8:36933 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0106a245d0e8:0, corePoolSize=2, maxPoolSize=2 2024-12-08T07:57:37,732 DEBUG [RS:0;0106a245d0e8:36933 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:37,732 DEBUG [RS:0;0106a245d0e8:36933 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:37,732 DEBUG [RS:0;0106a245d0e8:36933 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:37,732 DEBUG [RS:0;0106a245d0e8:36933 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:37,732 DEBUG [RS:0;0106a245d0e8:36933 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:37,732 DEBUG [RS:0;0106a245d0e8:36933 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:37,732 DEBUG [RS:0;0106a245d0e8:36933 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0106a245d0e8:0, corePoolSize=3, maxPoolSize=3 2024-12-08T07:57:37,733 DEBUG [RS:0;0106a245d0e8:36933 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0, corePoolSize=3, maxPoolSize=3 2024-12-08T07:57:37,740 INFO [RS:0;0106a245d0e8:36933 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:37,740 INFO [RS:0;0106a245d0e8:36933 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:37,740 INFO [RS:0;0106a245d0e8:36933 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:37,740 INFO [RS:0;0106a245d0e8:36933 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:37,740 INFO [RS:0;0106a245d0e8:36933 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:37,740 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T07:57:37,740 INFO [RS:0;0106a245d0e8:36933 {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,36933,1733644656827-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T07:57:37,741 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=691095, jitterRate=-0.12122738361358643}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T07:57:37,742 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733644657695Initializing all the Stores at 1733644657696 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644657696Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644657700 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644657700Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644657700Cleaning up temporary data from old regions at 1733644657726 (+26 ms)Region opened successfully at 1733644657742 (+16 ms) 2024-12-08T07:57:37,742 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T07:57:37,742 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T07:57:37,742 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T07:57:37,742 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T07:57:37,742 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T07:57:37,745 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T07:57:37,745 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733644657742Disabling compacts and flushes for region at 1733644657742Disabling writes for close at 1733644657742Writing region close event to WAL at 1733644657745 (+3 ms)Closed at 1733644657745 2024-12-08T07:57:37,747 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T07:57:37,747 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-08T07:57:37,748 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-08T07:57:37,749 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T07:57:37,750 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-08T07:57:37,758 INFO [RS:0;0106a245d0e8:36933 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T07:57:37,758 INFO [RS:0;0106a245d0e8:36933 {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,36933,1733644656827-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:37,758 INFO [RS:0;0106a245d0e8:36933 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:37,758 INFO [RS:0;0106a245d0e8:36933 {}] regionserver.Replication(171): 0106a245d0e8,36933,1733644656827 started 2024-12-08T07:57:37,773 INFO [RS:0;0106a245d0e8:36933 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:37,773 INFO [RS:0;0106a245d0e8:36933 {}] regionserver.HRegionServer(1482): Serving as 0106a245d0e8,36933,1733644656827, RpcServer on 0106a245d0e8/172.17.0.2:36933, sessionid=0x100046e26ab0001 2024-12-08T07:57:37,773 DEBUG [RS:0;0106a245d0e8:36933 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T07:57:37,773 DEBUG [RS:0;0106a245d0e8:36933 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0106a245d0e8,36933,1733644656827 2024-12-08T07:57:37,773 DEBUG [RS:0;0106a245d0e8:36933 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0106a245d0e8,36933,1733644656827' 2024-12-08T07:57:37,773 DEBUG [RS:0;0106a245d0e8:36933 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T07:57:37,774 DEBUG [RS:0;0106a245d0e8:36933 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T07:57:37,775 DEBUG [RS:0;0106a245d0e8:36933 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T07:57:37,775 DEBUG [RS:0;0106a245d0e8:36933 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T07:57:37,775 DEBUG [RS:0;0106a245d0e8:36933 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0106a245d0e8,36933,1733644656827 2024-12-08T07:57:37,775 DEBUG [RS:0;0106a245d0e8:36933 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0106a245d0e8,36933,1733644656827' 2024-12-08T07:57:37,775 DEBUG [RS:0;0106a245d0e8:36933 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T07:57:37,776 DEBUG [RS:0;0106a245d0e8:36933 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T07:57:37,776 DEBUG [RS:0;0106a245d0e8:36933 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T07:57:37,776 INFO [RS:0;0106a245d0e8:36933 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T07:57:37,776 INFO [RS:0;0106a245d0e8:36933 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T07:57:37,880 INFO [RS:0;0106a245d0e8:36933 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0106a245d0e8%2C36933%2C1733644656827, suffix=, logDir=hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/WALs/0106a245d0e8,36933,1733644656827, archiveDir=hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/oldWALs, maxLogs=32 2024-12-08T07:57:37,883 INFO [RS:0;0106a245d0e8:36933 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C36933%2C1733644656827.1733644657882 2024-12-08T07:57:37,892 INFO [RS:0;0106a245d0e8:36933 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/WALs/0106a245d0e8,36933,1733644656827/0106a245d0e8%2C36933%2C1733644656827.1733644657882 2024-12-08T07:57:37,894 DEBUG [RS:0;0106a245d0e8:36933 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36733:36733),(127.0.0.1/127.0.0.1:39289:39289)] 2024-12-08T07:57:37,901 DEBUG [0106a245d0e8:41589 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-08T07:57:37,902 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0106a245d0e8,36933,1733644656827 2024-12-08T07:57:37,904 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0106a245d0e8,36933,1733644656827, state=OPENING 2024-12-08T07:57:37,915 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-08T07:57:37,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36933-0x100046e26ab0001, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:57:37,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41589-0x100046e26ab0000, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:57:37,926 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T07:57:37,927 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T07:57:37,926 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T07:57:37,927 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=0106a245d0e8,36933,1733644656827}] 2024-12-08T07:57:38,080 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-08T07:57:38,084 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42755, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-08T07:57:38,090 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-08T07:57:38,091 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T07:57:38,094 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0106a245d0e8%2C36933%2C1733644656827.meta, suffix=.meta, logDir=hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/WALs/0106a245d0e8,36933,1733644656827, archiveDir=hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/oldWALs, maxLogs=32 2024-12-08T07:57:38,096 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C36933%2C1733644656827.meta.1733644658096.meta 2024-12-08T07:57:38,104 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/WALs/0106a245d0e8,36933,1733644656827/0106a245d0e8%2C36933%2C1733644656827.meta.1733644658096.meta 2024-12-08T07:57:38,106 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36733:36733),(127.0.0.1/127.0.0.1:39289:39289)] 2024-12-08T07:57:38,106 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-08T07:57:38,107 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-08T07:57:38,107 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-08T07:57:38,107 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-08T07:57:38,107 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-08T07:57:38,107 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T07:57:38,107 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-08T07:57:38,108 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-08T07:57:38,109 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T07:57:38,111 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T07:57:38,111 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:57:38,111 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:57:38,111 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T07:57:38,112 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T07:57:38,113 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:57:38,113 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:57:38,113 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T07:57:38,114 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T07:57:38,114 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:57:38,115 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:57:38,115 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T07:57:38,116 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T07:57:38,116 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:57:38,117 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:57:38,117 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T07:57:38,118 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/data/hbase/meta/1588230740 2024-12-08T07:57:38,120 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/data/hbase/meta/1588230740 2024-12-08T07:57:38,122 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T07:57:38,122 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T07:57:38,123 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T07:57:38,125 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T07:57:38,127 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=736980, jitterRate=-0.06288161873817444}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T07:57:38,127 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-08T07:57:38,128 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733644658108Writing region info on filesystem at 1733644658108Initializing all the Stores at 1733644658109 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644658109Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644658109Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644658109Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644658109Cleaning up temporary data from old regions at 1733644658122 (+13 ms)Running coprocessor post-open hooks at 1733644658127 (+5 ms)Region opened successfully at 1733644658128 (+1 ms) 2024-12-08T07:57:38,129 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733644658080 2024-12-08T07:57:38,133 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-08T07:57:38,133 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-08T07:57:38,134 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0106a245d0e8,36933,1733644656827 2024-12-08T07:57:38,136 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0106a245d0e8,36933,1733644656827, state=OPEN 2024-12-08T07:57:38,168 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-08T07:57:38,173 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36933-0x100046e26ab0001, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T07:57:38,174 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T07:57:38,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41589-0x100046e26ab0000, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T07:57:38,174 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T07:57:38,174 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=0106a245d0e8,36933,1733644656827 2024-12-08T07:57:38,179 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-08T07:57:38,179 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=0106a245d0e8,36933,1733644656827 in 247 msec 2024-12-08T07:57:38,183 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-08T07:57:38,183 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 431 msec 2024-12-08T07:57:38,184 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T07:57:38,184 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-08T07:57:38,185 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T07:57:38,186 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0106a245d0e8,36933,1733644656827, seqNum=-1] 2024-12-08T07:57:38,186 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T07:57:38,188 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59557, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T07:57:38,196 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 544 msec 2024-12-08T07:57:38,196 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733644658196, completionTime=-1 2024-12-08T07:57:38,196 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-08T07:57:38,196 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-08T07:57:38,199 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-08T07:57:38,199 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733644718199 2024-12-08T07:57:38,199 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733644778199 2024-12-08T07:57:38,199 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-08T07:57:38,199 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,41589,1733644656629-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:38,199 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,41589,1733644656629-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:38,199 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,41589,1733644656629-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:38,199 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0106a245d0e8:41589, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:38,200 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:38,200 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:38,202 DEBUG [master/0106a245d0e8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-08T07:57:38,204 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.310sec 2024-12-08T07:57:38,204 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-08T07:57:38,205 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-08T07:57:38,205 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-08T07:57:38,205 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-08T07:57:38,205 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-08T07:57:38,205 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,41589,1733644656629-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T07:57:38,205 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,41589,1733644656629-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-08T07:57:38,208 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-08T07:57:38,208 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-08T07:57:38,208 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,41589,1733644656629-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:38,252 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b9379ae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T07:57:38,252 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 0106a245d0e8,41589,-1 for getting cluster id 2024-12-08T07:57:38,253 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-08T07:57:38,255 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'bd47c260-1c7a-451e-a542-ac5424636603' 2024-12-08T07:57:38,256 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-08T07:57:38,256 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "bd47c260-1c7a-451e-a542-ac5424636603" 2024-12-08T07:57:38,257 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38887dbe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T07:57:38,257 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [0106a245d0e8,41589,-1] 2024-12-08T07:57:38,257 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-08T07:57:38,258 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T07:57:38,261 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36038, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-08T07:57:38,263 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3aadf114, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T07:57:38,263 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T07:57:38,265 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0106a245d0e8,36933,1733644656827, seqNum=-1] 2024-12-08T07:57:38,265 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T07:57:38,267 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51108, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T07:57:38,269 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=0106a245d0e8,41589,1733644656629 2024-12-08T07:57:38,270 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:57:38,272 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-08T07:57:38,272 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-08T07:57:38,273 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T07:57:38,273 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T07:57:38,273 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T07:57:38,273 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T07:57:38,273 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-08T07:57:38,273 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-08T07:57:38,273 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1434811321, stopped=false 2024-12-08T07:57:38,273 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=0106a245d0e8,41589,1733644656629 2024-12-08T07:57:38,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41589-0x100046e26ab0000, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T07:57:38,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36933-0x100046e26ab0001, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T07:57:38,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41589-0x100046e26ab0000, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:57:38,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36933-0x100046e26ab0001, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:57:38,294 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T07:57:38,294 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T07:57:38,295 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T07:57:38,295 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T07:57:38,295 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0106a245d0e8,36933,1733644656827' ***** 2024-12-08T07:57:38,295 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36933-0x100046e26ab0001, quorum=127.0.0.1:49281, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T07:57:38,295 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41589-0x100046e26ab0000, quorum=127.0.0.1:49281, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T07:57:38,295 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T07:57:38,295 INFO [RS:0;0106a245d0e8:36933 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T07:57:38,295 INFO [RS:0;0106a245d0e8:36933 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T07:57:38,295 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T07:57:38,295 INFO [RS:0;0106a245d0e8:36933 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T07:57:38,295 INFO [RS:0;0106a245d0e8:36933 {}] regionserver.HRegionServer(959): stopping server 0106a245d0e8,36933,1733644656827 2024-12-08T07:57:38,296 INFO [RS:0;0106a245d0e8:36933 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T07:57:38,296 INFO [RS:0;0106a245d0e8:36933 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;0106a245d0e8:36933. 2024-12-08T07:57:38,296 DEBUG [RS:0;0106a245d0e8:36933 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T07:57:38,296 DEBUG [RS:0;0106a245d0e8:36933 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T07:57:38,296 INFO [RS:0;0106a245d0e8:36933 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T07:57:38,296 INFO [RS:0;0106a245d0e8:36933 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T07:57:38,296 INFO [RS:0;0106a245d0e8:36933 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T07:57:38,296 INFO [RS:0;0106a245d0e8:36933 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-08T07:57:38,296 INFO [RS:0;0106a245d0e8:36933 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-08T07:57:38,296 DEBUG [RS:0;0106a245d0e8:36933 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-08T07:57:38,297 DEBUG [RS:0;0106a245d0e8:36933 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-08T07:57:38,297 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T07:57:38,297 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T07:57:38,297 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T07:57:38,297 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T07:57:38,297 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T07:57:38,297 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-08T07:57:38,313 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/data/hbase/meta/1588230740/.tmp/ns/72549f3118aa41369adf75f1bbd517da is 43, key is default/ns:d/1733644658188/Put/seqid=0 2024-12-08T07:57:38,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35633 is added to blk_1073741835_1011 (size=5153) 2024-12-08T07:57:38,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46235 is added to blk_1073741835_1011 (size=5153) 2024-12-08T07:57:38,321 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/data/hbase/meta/1588230740/.tmp/ns/72549f3118aa41369adf75f1bbd517da 2024-12-08T07:57:38,331 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/data/hbase/meta/1588230740/.tmp/ns/72549f3118aa41369adf75f1bbd517da as hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/data/hbase/meta/1588230740/ns/72549f3118aa41369adf75f1bbd517da 2024-12-08T07:57:38,339 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/data/hbase/meta/1588230740/ns/72549f3118aa41369adf75f1bbd517da, entries=2, sequenceid=6, filesize=5.0 K 2024-12-08T07:57:38,340 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 43ms, sequenceid=6, compaction requested=false 2024-12-08T07:57:38,341 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-08T07:57:38,347 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-08T07:57:38,348 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T07:57:38,348 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T07:57:38,348 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733644658296Running coprocessor pre-close hooks at 1733644658296Disabling compacts and flushes for region at 1733644658296Disabling writes for close at 1733644658297 (+1 ms)Obtaining lock to block concurrent updates at 1733644658297Preparing flush snapshotting stores in 1588230740 at 1733644658297Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733644658297Flushing stores of hbase:meta,,1.1588230740 at 1733644658298 (+1 ms)Flushing 1588230740/ns: creating writer at 1733644658298Flushing 1588230740/ns: appending metadata at 1733644658313 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1733644658313Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@26db55ee: reopening flushed file at 1733644658329 (+16 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 43ms, sequenceid=6, compaction requested=false at 1733644658340 (+11 ms)Writing region close event to WAL at 1733644658342 (+2 ms)Running coprocessor post-close hooks at 1733644658348 (+6 ms)Closed at 1733644658348 2024-12-08T07:57:38,348 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-08T07:57:38,497 INFO [RS:0;0106a245d0e8:36933 {}] regionserver.HRegionServer(976): stopping server 0106a245d0e8,36933,1733644656827; all regions closed. 2024-12-08T07:57:38,497 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:38,498 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:38,498 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:38,498 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:38,498 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:38,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46235 is added to blk_1073741834_1010 (size=1152) 2024-12-08T07:57:38,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35633 is added to blk_1073741834_1010 (size=1152) 2024-12-08T07:57:38,504 DEBUG [RS:0;0106a245d0e8:36933 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/oldWALs 2024-12-08T07:57:38,504 INFO [RS:0;0106a245d0e8:36933 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0106a245d0e8%2C36933%2C1733644656827.meta:.meta(num 1733644658096) 2024-12-08T07:57:38,505 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:38,505 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:38,505 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:38,506 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:38,506 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:38,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46235 is added to blk_1073741833_1009 (size=93) 2024-12-08T07:57:38,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35633 is added to blk_1073741833_1009 (size=93) 2024-12-08T07:57:38,513 DEBUG [RS:0;0106a245d0e8:36933 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/oldWALs 2024-12-08T07:57:38,513 INFO [RS:0;0106a245d0e8:36933 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0106a245d0e8%2C36933%2C1733644656827:(num 1733644657882) 2024-12-08T07:57:38,513 DEBUG [RS:0;0106a245d0e8:36933 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T07:57:38,513 INFO [RS:0;0106a245d0e8:36933 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T07:57:38,513 INFO [RS:0;0106a245d0e8:36933 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T07:57:38,513 INFO [RS:0;0106a245d0e8:36933 {}] hbase.ChoreService(370): Chore service for: regionserver/0106a245d0e8:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-08T07:57:38,513 INFO [RS:0;0106a245d0e8:36933 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T07:57:38,513 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T07:57:38,514 INFO [RS:0;0106a245d0e8:36933 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36933 2024-12-08T07:57:38,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41589-0x100046e26ab0000, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T07:57:38,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36933-0x100046e26ab0001, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0106a245d0e8,36933,1733644656827 2024-12-08T07:57:38,526 INFO [RS:0;0106a245d0e8:36933 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T07:57:38,536 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0106a245d0e8,36933,1733644656827] 2024-12-08T07:57:38,547 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0106a245d0e8,36933,1733644656827 already deleted, retry=false 2024-12-08T07:57:38,547 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0106a245d0e8,36933,1733644656827 expired; onlineServers=0 2024-12-08T07:57:38,547 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '0106a245d0e8,41589,1733644656629' ***** 2024-12-08T07:57:38,547 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-08T07:57:38,547 INFO [M:0;0106a245d0e8:41589 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T07:57:38,547 INFO [M:0;0106a245d0e8:41589 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T07:57:38,547 DEBUG [M:0;0106a245d0e8:41589 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-08T07:57:38,548 DEBUG [M:0;0106a245d0e8:41589 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-08T07:57:38,548 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-08T07:57:38,548 DEBUG [master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.small.0-1733644657670 {}] cleaner.HFileCleaner(306): Exit Thread[master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.small.0-1733644657670,5,FailOnTimeoutGroup] 2024-12-08T07:57:38,548 DEBUG [master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.large.0-1733644657669 {}] cleaner.HFileCleaner(306): Exit Thread[master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.large.0-1733644657669,5,FailOnTimeoutGroup] 2024-12-08T07:57:38,548 INFO [M:0;0106a245d0e8:41589 {}] hbase.ChoreService(370): Chore service for: master/0106a245d0e8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-08T07:57:38,548 INFO [M:0;0106a245d0e8:41589 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T07:57:38,548 DEBUG [M:0;0106a245d0e8:41589 {}] master.HMaster(1795): Stopping service threads 2024-12-08T07:57:38,548 INFO [M:0;0106a245d0e8:41589 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-08T07:57:38,548 INFO [M:0;0106a245d0e8:41589 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T07:57:38,548 INFO [M:0;0106a245d0e8:41589 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-08T07:57:38,548 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-08T07:57:38,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41589-0x100046e26ab0000, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T07:57:38,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41589-0x100046e26ab0000, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:57:38,557 DEBUG [M:0;0106a245d0e8:41589 {}] zookeeper.ZKUtil(347): master:41589-0x100046e26ab0000, quorum=127.0.0.1:49281, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-08T07:57:38,557 WARN [M:0;0106a245d0e8:41589 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-08T07:57:38,558 INFO [M:0;0106a245d0e8:41589 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/.lastflushedseqids 2024-12-08T07:57:38,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46235 is added to blk_1073741836_1012 (size=99) 2024-12-08T07:57:38,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35633 is added to blk_1073741836_1012 (size=99) 2024-12-08T07:57:38,565 INFO [M:0;0106a245d0e8:41589 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-08T07:57:38,565 INFO [M:0;0106a245d0e8:41589 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-08T07:57:38,565 DEBUG [M:0;0106a245d0e8:41589 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T07:57:38,566 INFO [M:0;0106a245d0e8:41589 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:57:38,566 DEBUG [M:0;0106a245d0e8:41589 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:57:38,566 DEBUG [M:0;0106a245d0e8:41589 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T07:57:38,566 DEBUG [M:0;0106a245d0e8:41589 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:57:38,566 INFO [M:0;0106a245d0e8:41589 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-08T07:57:38,585 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:57:38,590 DEBUG [M:0;0106a245d0e8:41589 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d8c1ec6a5dca4f57ab91dfd851561018 is 82, key is hbase:meta,,1/info:regioninfo/1733644658134/Put/seqid=0 2024-12-08T07:57:38,591 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:57:38,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46235 is added to blk_1073741837_1013 (size=5672) 2024-12-08T07:57:38,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35633 is added to blk_1073741837_1013 (size=5672) 2024-12-08T07:57:38,598 INFO [M:0;0106a245d0e8:41589 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d8c1ec6a5dca4f57ab91dfd851561018 2024-12-08T07:57:38,619 DEBUG [M:0;0106a245d0e8:41589 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/29d7b9e92a2c42158cc6bad9f3d5403e is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733644658195/Put/seqid=0 2024-12-08T07:57:38,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35633 is added to blk_1073741838_1014 (size=5275) 2024-12-08T07:57:38,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46235 is added to blk_1073741838_1014 (size=5275) 2024-12-08T07:57:38,625 INFO [M:0;0106a245d0e8:41589 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/29d7b9e92a2c42158cc6bad9f3d5403e 2024-12-08T07:57:38,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36933-0x100046e26ab0001, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T07:57:38,636 INFO [RS:0;0106a245d0e8:36933 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T07:57:38,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36933-0x100046e26ab0001, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T07:57:38,637 INFO [RS:0;0106a245d0e8:36933 {}] regionserver.HRegionServer(1031): Exiting; stopping=0106a245d0e8,36933,1733644656827; zookeeper connection closed. 2024-12-08T07:57:38,637 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6eb10c59 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6eb10c59 2024-12-08T07:57:38,637 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-08T07:57:38,647 DEBUG [M:0;0106a245d0e8:41589 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6410e3a62f594b5cb730d736e5e2485f is 69, key is 0106a245d0e8,36933,1733644656827/rs:state/1733644657704/Put/seqid=0 2024-12-08T07:57:38,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46235 is added to blk_1073741839_1015 (size=5156) 2024-12-08T07:57:38,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35633 is added to blk_1073741839_1015 (size=5156) 2024-12-08T07:57:38,655 INFO [M:0;0106a245d0e8:41589 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6410e3a62f594b5cb730d736e5e2485f 2024-12-08T07:57:38,678 DEBUG [M:0;0106a245d0e8:41589 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d1a170351e594db2b2e53546a7a2a566 is 52, key is load_balancer_on/state:d/1733644658271/Put/seqid=0 2024-12-08T07:57:38,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35633 is added to blk_1073741840_1016 (size=5056) 2024-12-08T07:57:38,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46235 is added to blk_1073741840_1016 (size=5056) 2024-12-08T07:57:38,684 INFO [M:0;0106a245d0e8:41589 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d1a170351e594db2b2e53546a7a2a566 2024-12-08T07:57:38,691 DEBUG [M:0;0106a245d0e8:41589 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d8c1ec6a5dca4f57ab91dfd851561018 as hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d8c1ec6a5dca4f57ab91dfd851561018 2024-12-08T07:57:38,699 INFO [M:0;0106a245d0e8:41589 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d8c1ec6a5dca4f57ab91dfd851561018, entries=8, sequenceid=29, filesize=5.5 K 2024-12-08T07:57:38,700 DEBUG [M:0;0106a245d0e8:41589 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/29d7b9e92a2c42158cc6bad9f3d5403e as hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/29d7b9e92a2c42158cc6bad9f3d5403e 2024-12-08T07:57:38,707 INFO [M:0;0106a245d0e8:41589 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/29d7b9e92a2c42158cc6bad9f3d5403e, entries=3, sequenceid=29, filesize=5.2 K 2024-12-08T07:57:38,709 DEBUG [M:0;0106a245d0e8:41589 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6410e3a62f594b5cb730d736e5e2485f as hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6410e3a62f594b5cb730d736e5e2485f 2024-12-08T07:57:38,715 INFO [M:0;0106a245d0e8:41589 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6410e3a62f594b5cb730d736e5e2485f, entries=1, sequenceid=29, filesize=5.0 K 2024-12-08T07:57:38,717 DEBUG [M:0;0106a245d0e8:41589 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d1a170351e594db2b2e53546a7a2a566 as hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d1a170351e594db2b2e53546a7a2a566 2024-12-08T07:57:38,723 INFO [M:0;0106a245d0e8:41589 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36351/user/jenkins/test-data/13f21311-fc7f-18bf-25f2-4b7d0aae5b37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d1a170351e594db2b2e53546a7a2a566, entries=1, sequenceid=29, filesize=4.9 K 2024-12-08T07:57:38,724 INFO [M:0;0106a245d0e8:41589 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 158ms, sequenceid=29, compaction requested=false 2024-12-08T07:57:38,726 INFO [M:0;0106a245d0e8:41589 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:57:38,726 DEBUG [M:0;0106a245d0e8:41589 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733644658565Disabling compacts and flushes for region at 1733644658565Disabling writes for close at 1733644658566 (+1 ms)Obtaining lock to block concurrent updates at 1733644658566Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733644658566Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733644658567 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733644658567Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733644658568 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733644658590 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733644658590Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733644658604 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733644658618 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733644658618Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733644658630 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733644658646 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733644658646Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733644658662 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733644658677 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733644658677Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@48f39d0e: reopening flushed file at 1733644658690 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@23aed8b3: reopening flushed file at 1733644658699 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7a92cee6: reopening flushed file at 1733644658708 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@18b9fb55: reopening flushed file at 1733644658716 (+8 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 158ms, sequenceid=29, compaction requested=false at 1733644658724 (+8 ms)Writing region close event to WAL at 1733644658726 (+2 ms)Closed at 1733644658726 2024-12-08T07:57:38,726 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:38,726 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:38,727 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:38,727 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:38,727 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:38,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46235 is added to blk_1073741830_1006 (size=10311) 2024-12-08T07:57:38,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35633 is added to blk_1073741830_1006 (size=10311) 2024-12-08T07:57:39,110 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-08T07:57:39,115 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:57:39,130 INFO [M:0;0106a245d0e8:41589 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-08T07:57:39,130 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T07:57:39,130 INFO [M:0;0106a245d0e8:41589 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41589 2024-12-08T07:57:39,131 INFO [M:0;0106a245d0e8:41589 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T07:57:39,135 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:57:39,321 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41589-0x100046e26ab0000, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T07:57:39,321 INFO [M:0;0106a245d0e8:41589 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T07:57:39,321 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41589-0x100046e26ab0000, quorum=127.0.0.1:49281, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T07:57:39,326 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3f5c23ef{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:57:39,327 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@716c7b87{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T07:57:39,327 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T07:57:39,328 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1bc8c098{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T07:57:39,328 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7b915b67{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/hadoop.log.dir/,STOPPED} 2024-12-08T07:57:39,330 WARN [BP-1534295543-172.17.0.2-1733644654204 heartbeating to localhost/127.0.0.1:36351 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T07:57:39,330 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T07:57:39,330 WARN [BP-1534295543-172.17.0.2-1733644654204 heartbeating to localhost/127.0.0.1:36351 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1534295543-172.17.0.2-1733644654204 (Datanode Uuid b291005b-25cd-4af2-8e67-6c51982e0316) service to localhost/127.0.0.1:36351 2024-12-08T07:57:39,330 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T07:57:39,330 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/cluster_d22af574-953c-6bee-ca6f-ac20b217175f/data/data3/current/BP-1534295543-172.17.0.2-1733644654204 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T07:57:39,331 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/cluster_d22af574-953c-6bee-ca6f-ac20b217175f/data/data4/current/BP-1534295543-172.17.0.2-1733644654204 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T07:57:39,331 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T07:57:39,333 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@52b07bdb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:57:39,333 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@70121b28{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T07:57:39,333 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T07:57:39,334 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3aa9354f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T07:57:39,334 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c8914e7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/hadoop.log.dir/,STOPPED} 2024-12-08T07:57:39,334 WARN [BP-1534295543-172.17.0.2-1733644654204 heartbeating to localhost/127.0.0.1:36351 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T07:57:39,335 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T07:57:39,335 WARN [BP-1534295543-172.17.0.2-1733644654204 heartbeating to localhost/127.0.0.1:36351 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1534295543-172.17.0.2-1733644654204 (Datanode Uuid aa9f56e8-f9cd-462f-ac75-df8769b6dc73) service to localhost/127.0.0.1:36351 2024-12-08T07:57:39,335 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T07:57:39,335 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/cluster_d22af574-953c-6bee-ca6f-ac20b217175f/data/data1/current/BP-1534295543-172.17.0.2-1733644654204 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T07:57:39,335 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/cluster_d22af574-953c-6bee-ca6f-ac20b217175f/data/data2/current/BP-1534295543-172.17.0.2-1733644654204 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T07:57:39,336 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T07:57:39,340 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@493d1d34{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T07:57:39,341 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6a249094{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T07:57:39,341 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T07:57:39,341 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75cbfab9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T07:57:39,341 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65506a11{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/hadoop.log.dir/,STOPPED} 2024-12-08T07:57:39,346 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-08T07:57:39,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-08T07:57:39,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-08T07:57:39,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/hadoop.log.dir so I do NOT create it in target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6 2024-12-08T07:57:39,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd638054-d905-dc22-79ec-29e87a9c2776/hadoop.tmp.dir so I do NOT create it in target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6 2024-12-08T07:57:39,367 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c, deleteOnExit=true 2024-12-08T07:57:39,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-08T07:57:39,368 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/test.cache.data in system properties and HBase conf 2024-12-08T07:57:39,368 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/hadoop.tmp.dir in system properties and HBase conf 2024-12-08T07:57:39,368 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/hadoop.log.dir in system properties and HBase conf 2024-12-08T07:57:39,368 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-08T07:57:39,368 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-08T07:57:39,368 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-08T07:57:39,368 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-08T07:57:39,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-08T07:57:39,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-08T07:57:39,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-08T07:57:39,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T07:57:39,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-08T07:57:39,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-08T07:57:39,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T07:57:39,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T07:57:39,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-08T07:57:39,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/nfs.dump.dir in system properties and HBase conf 2024-12-08T07:57:39,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/java.io.tmpdir in system properties and HBase conf 2024-12-08T07:57:39,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T07:57:39,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-08T07:57:39,370 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-08T07:57:39,380 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T07:57:39,742 INFO [regionserver/0106a245d0e8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T07:57:39,848 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T07:57:39,852 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T07:57:39,856 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T07:57:39,856 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T07:57:39,856 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T07:57:39,857 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T07:57:39,857 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@692b8c40{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/hadoop.log.dir/,AVAILABLE} 2024-12-08T07:57:39,857 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ab5393f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T07:57:39,946 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3c461833{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/java.io.tmpdir/jetty-localhost-46123-hadoop-hdfs-3_4_1-tests_jar-_-any-2764541037695792820/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T07:57:39,947 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@33e53d1d{HTTP/1.1, (http/1.1)}{localhost:46123} 2024-12-08T07:57:39,947 INFO [Time-limited test {}] server.Server(415): Started @107514ms 2024-12-08T07:57:39,958 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T07:57:40,204 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T07:57:40,208 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T07:57:40,209 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T07:57:40,209 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T07:57:40,209 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T07:57:40,209 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a4bf55{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/hadoop.log.dir/,AVAILABLE} 2024-12-08T07:57:40,210 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@23bb5222{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T07:57:40,302 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5dd91be7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/java.io.tmpdir/jetty-localhost-34445-hadoop-hdfs-3_4_1-tests_jar-_-any-14132647932470052123/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:57:40,302 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@55cdd36{HTTP/1.1, (http/1.1)}{localhost:34445} 2024-12-08T07:57:40,302 INFO [Time-limited test {}] server.Server(415): Started @107870ms 2024-12-08T07:57:40,303 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T07:57:40,329 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T07:57:40,332 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T07:57:40,333 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T07:57:40,333 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T07:57:40,333 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T07:57:40,334 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@42443481{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/hadoop.log.dir/,AVAILABLE} 2024-12-08T07:57:40,334 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@144f8866{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T07:57:40,425 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@24c93faa{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/java.io.tmpdir/jetty-localhost-46369-hadoop-hdfs-3_4_1-tests_jar-_-any-16739377411869716318/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:57:40,426 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1e8113bb{HTTP/1.1, (http/1.1)}{localhost:46369} 2024-12-08T07:57:40,426 INFO [Time-limited test {}] server.Server(415): Started @107993ms 2024-12-08T07:57:40,427 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T07:57:41,484 WARN [Thread-673 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data1/current/BP-997826538-172.17.0.2-1733644659391/current, will proceed with Du for space computation calculation, 2024-12-08T07:57:41,484 WARN [Thread-674 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data2/current/BP-997826538-172.17.0.2-1733644659391/current, will proceed with Du for space computation calculation, 2024-12-08T07:57:41,514 WARN [Thread-637 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T07:57:41,516 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa073963a41f5adb1 with lease ID 0x98c7d3aa439fb3ad: Processing first storage report for DS-e35af792-8aa3-4f36-a955-ecf3072f805a from datanode DatanodeRegistration(127.0.0.1:42967, datanodeUuid=08018853-d44f-4a79-a44e-474bbefc64c2, infoPort=35315, infoSecurePort=0, ipcPort=35735, storageInfo=lv=-57;cid=testClusterID;nsid=466817069;c=1733644659391) 2024-12-08T07:57:41,516 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa073963a41f5adb1 with lease ID 0x98c7d3aa439fb3ad: from storage DS-e35af792-8aa3-4f36-a955-ecf3072f805a node DatanodeRegistration(127.0.0.1:42967, datanodeUuid=08018853-d44f-4a79-a44e-474bbefc64c2, infoPort=35315, infoSecurePort=0, ipcPort=35735, storageInfo=lv=-57;cid=testClusterID;nsid=466817069;c=1733644659391), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:57:41,516 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa073963a41f5adb1 with lease ID 0x98c7d3aa439fb3ad: Processing first storage report for DS-abc4b15a-8898-4a94-9028-c6e31b60c63a from datanode DatanodeRegistration(127.0.0.1:42967, datanodeUuid=08018853-d44f-4a79-a44e-474bbefc64c2, infoPort=35315, infoSecurePort=0, ipcPort=35735, storageInfo=lv=-57;cid=testClusterID;nsid=466817069;c=1733644659391) 2024-12-08T07:57:41,517 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa073963a41f5adb1 with lease ID 0x98c7d3aa439fb3ad: from storage DS-abc4b15a-8898-4a94-9028-c6e31b60c63a node DatanodeRegistration(127.0.0.1:42967, datanodeUuid=08018853-d44f-4a79-a44e-474bbefc64c2, infoPort=35315, infoSecurePort=0, ipcPort=35735, storageInfo=lv=-57;cid=testClusterID;nsid=466817069;c=1733644659391), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:57:41,680 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data3/current/BP-997826538-172.17.0.2-1733644659391/current, will proceed with Du for space computation calculation, 2024-12-08T07:57:41,680 WARN [Thread-685 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data4/current/BP-997826538-172.17.0.2-1733644659391/current, will proceed with Du for space computation calculation, 2024-12-08T07:57:41,702 WARN [Thread-660 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T07:57:41,705 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x63c05bb7b2b31825 with lease ID 0x98c7d3aa439fb3ae: Processing first storage report for DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8 from datanode DatanodeRegistration(127.0.0.1:42091, datanodeUuid=680b3943-ef0b-4b4c-af1f-96c7df4955d5, infoPort=36731, infoSecurePort=0, ipcPort=40767, storageInfo=lv=-57;cid=testClusterID;nsid=466817069;c=1733644659391) 2024-12-08T07:57:41,705 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x63c05bb7b2b31825 with lease ID 0x98c7d3aa439fb3ae: from storage DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8 node DatanodeRegistration(127.0.0.1:42091, datanodeUuid=680b3943-ef0b-4b4c-af1f-96c7df4955d5, infoPort=36731, infoSecurePort=0, ipcPort=40767, storageInfo=lv=-57;cid=testClusterID;nsid=466817069;c=1733644659391), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:57:41,705 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x63c05bb7b2b31825 with lease ID 0x98c7d3aa439fb3ae: Processing first storage report for DS-af6e152d-a5b0-4184-9035-a2aa7951d0a0 from datanode DatanodeRegistration(127.0.0.1:42091, datanodeUuid=680b3943-ef0b-4b4c-af1f-96c7df4955d5, infoPort=36731, infoSecurePort=0, ipcPort=40767, storageInfo=lv=-57;cid=testClusterID;nsid=466817069;c=1733644659391) 2024-12-08T07:57:41,705 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x63c05bb7b2b31825 with lease ID 0x98c7d3aa439fb3ae: from storage DS-af6e152d-a5b0-4184-9035-a2aa7951d0a0 node DatanodeRegistration(127.0.0.1:42091, datanodeUuid=680b3943-ef0b-4b4c-af1f-96c7df4955d5, infoPort=36731, infoSecurePort=0, ipcPort=40767, storageInfo=lv=-57;cid=testClusterID;nsid=466817069;c=1733644659391), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:57:41,766 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6 2024-12-08T07:57:41,770 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/zookeeper_0, clientPort=49932, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-08T07:57:41,772 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49932 2024-12-08T07:57:41,772 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:57:41,775 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:57:41,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42967 is added to blk_1073741825_1001 (size=7) 2024-12-08T07:57:41,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42091 is added to blk_1073741825_1001 (size=7) 2024-12-08T07:57:41,786 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621 with version=8 2024-12-08T07:57:41,786 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/hbase-staging 2024-12-08T07:57:41,788 INFO [Time-limited test {}] client.ConnectionUtils(128): master/0106a245d0e8:0 server-side Connection retries=45 2024-12-08T07:57:41,788 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T07:57:41,789 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T07:57:41,789 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T07:57:41,789 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T07:57:41,789 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T07:57:41,789 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-08T07:57:41,789 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T07:57:41,790 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33265 2024-12-08T07:57:41,791 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33265 connecting to ZooKeeper ensemble=127.0.0.1:49932 2024-12-08T07:57:41,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:332650x0, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T07:57:41,850 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33265-0x100046e3ad00000 connected 2024-12-08T07:57:41,936 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:57:41,939 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:57:41,944 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33265-0x100046e3ad00000, quorum=127.0.0.1:49932, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T07:57:41,945 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621, hbase.cluster.distributed=false 2024-12-08T07:57:41,947 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33265-0x100046e3ad00000, quorum=127.0.0.1:49932, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T07:57:41,948 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33265 2024-12-08T07:57:41,949 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33265 2024-12-08T07:57:41,950 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33265 2024-12-08T07:57:41,952 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33265 2024-12-08T07:57:41,952 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33265 2024-12-08T07:57:41,969 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0106a245d0e8:0 server-side Connection retries=45 2024-12-08T07:57:41,969 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T07:57:41,969 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T07:57:41,969 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T07:57:41,969 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T07:57:41,969 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T07:57:41,969 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T07:57:41,969 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T07:57:41,970 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43367 2024-12-08T07:57:41,972 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43367 connecting to ZooKeeper ensemble=127.0.0.1:49932 2024-12-08T07:57:41,973 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:57:41,975 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:57:41,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:433670x0, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T07:57:41,989 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:433670x0, quorum=127.0.0.1:49932, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T07:57:41,989 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43367-0x100046e3ad00001 connected 2024-12-08T07:57:41,990 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T07:57:41,992 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T07:57:41,992 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43367-0x100046e3ad00001, quorum=127.0.0.1:49932, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T07:57:41,993 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43367-0x100046e3ad00001, quorum=127.0.0.1:49932, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T07:57:41,994 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43367 2024-12-08T07:57:41,994 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43367 2024-12-08T07:57:41,994 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43367 2024-12-08T07:57:41,994 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43367 2024-12-08T07:57:41,995 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43367 2024-12-08T07:57:42,004 DEBUG [M:0;0106a245d0e8:33265 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0106a245d0e8:33265 2024-12-08T07:57:42,005 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/0106a245d0e8,33265,1733644661788 2024-12-08T07:57:42,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43367-0x100046e3ad00001, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T07:57:42,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33265-0x100046e3ad00000, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T07:57:42,010 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33265-0x100046e3ad00000, quorum=127.0.0.1:49932, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0106a245d0e8,33265,1733644661788 2024-12-08T07:57:42,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33265-0x100046e3ad00000, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:57:42,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43367-0x100046e3ad00001, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T07:57:42,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43367-0x100046e3ad00001, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:57:42,021 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33265-0x100046e3ad00000, quorum=127.0.0.1:49932, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T07:57:42,022 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0106a245d0e8,33265,1733644661788 from backup master directory 2024-12-08T07:57:42,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33265-0x100046e3ad00000, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0106a245d0e8,33265,1733644661788 2024-12-08T07:57:42,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43367-0x100046e3ad00001, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T07:57:42,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33265-0x100046e3ad00000, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T07:57:42,031 WARN [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T07:57:42,031 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0106a245d0e8,33265,1733644661788 2024-12-08T07:57:42,039 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/hbase.id] with ID: 0df9df2c-09b7-49c0-86c0-9d0c94422e3f 2024-12-08T07:57:42,039 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/.tmp/hbase.id 2024-12-08T07:57:42,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42967 is added to blk_1073741826_1002 (size=42) 2024-12-08T07:57:42,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42091 is added to blk_1073741826_1002 (size=42) 2024-12-08T07:57:42,048 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/.tmp/hbase.id]:[hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/hbase.id] 2024-12-08T07:57:42,065 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:57:42,065 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-08T07:57:42,068 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 3ms. 2024-12-08T07:57:42,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33265-0x100046e3ad00000, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:57:42,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43367-0x100046e3ad00001, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:57:42,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42967 is added to blk_1073741827_1003 (size=196) 2024-12-08T07:57:42,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42091 is added to blk_1073741827_1003 (size=196) 2024-12-08T07:57:42,089 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T07:57:42,089 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-08T07:57:42,090 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T07:57:42,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42091 is added to blk_1073741828_1004 (size=1189) 2024-12-08T07:57:42,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42967 is added to blk_1073741828_1004 (size=1189) 2024-12-08T07:57:42,099 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/data/master/store 2024-12-08T07:57:42,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42967 is added to blk_1073741829_1005 (size=34) 2024-12-08T07:57:42,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42091 is added to blk_1073741829_1005 (size=34) 2024-12-08T07:57:42,106 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T07:57:42,106 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T07:57:42,106 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:57:42,106 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:57:42,106 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T07:57:42,106 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:57:42,106 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:57:42,106 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733644662106Disabling compacts and flushes for region at 1733644662106Disabling writes for close at 1733644662106Writing region close event to WAL at 1733644662106Closed at 1733644662106 2024-12-08T07:57:42,107 WARN [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/data/master/store/.initializing 2024-12-08T07:57:42,107 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/WALs/0106a245d0e8,33265,1733644661788 2024-12-08T07:57:42,110 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0106a245d0e8%2C33265%2C1733644661788, suffix=, logDir=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/WALs/0106a245d0e8,33265,1733644661788, archiveDir=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/oldWALs, maxLogs=10 2024-12-08T07:57:42,110 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C33265%2C1733644661788.1733644662110 2024-12-08T07:57:42,116 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/WALs/0106a245d0e8,33265,1733644661788/0106a245d0e8%2C33265%2C1733644661788.1733644662110 2024-12-08T07:57:42,117 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35315:35315),(127.0.0.1/127.0.0.1:36731:36731)] 2024-12-08T07:57:42,118 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-08T07:57:42,118 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T07:57:42,118 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:57:42,118 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:57:42,120 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:57:42,121 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-08T07:57:42,122 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:57:42,122 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:57:42,122 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:57:42,124 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-08T07:57:42,124 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:57:42,124 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T07:57:42,124 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:57:42,125 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-08T07:57:42,125 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:57:42,126 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T07:57:42,126 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:57:42,127 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-08T07:57:42,127 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:57:42,128 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T07:57:42,128 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:57:42,128 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:57:42,129 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:57:42,130 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:57:42,130 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:57:42,131 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T07:57:42,132 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:57:42,134 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T07:57:42,135 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=724846, jitterRate=-0.07831169664859772}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T07:57:42,136 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733644662118Initializing all the Stores at 1733644662119 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644662119Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644662120 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644662120Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644662120Cleaning up temporary data from old regions at 1733644662130 (+10 ms)Region opened successfully at 1733644662136 (+6 ms) 2024-12-08T07:57:42,136 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-08T07:57:42,140 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4159c4c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0106a245d0e8/172.17.0.2:0 2024-12-08T07:57:42,141 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-08T07:57:42,142 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-08T07:57:42,142 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-08T07:57:42,142 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-08T07:57:42,142 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-08T07:57:42,143 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-08T07:57:42,143 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-08T07:57:42,145 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-08T07:57:42,146 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33265-0x100046e3ad00000, quorum=127.0.0.1:49932, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-08T07:57:42,157 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-08T07:57:42,157 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-08T07:57:42,158 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33265-0x100046e3ad00000, quorum=127.0.0.1:49932, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-08T07:57:42,167 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-08T07:57:42,168 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-08T07:57:42,169 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33265-0x100046e3ad00000, quorum=127.0.0.1:49932, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-08T07:57:42,178 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-08T07:57:42,179 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33265-0x100046e3ad00000, quorum=127.0.0.1:49932, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-08T07:57:42,188 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-08T07:57:42,192 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33265-0x100046e3ad00000, quorum=127.0.0.1:49932, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-08T07:57:42,199 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-08T07:57:42,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33265-0x100046e3ad00000, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T07:57:42,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43367-0x100046e3ad00001, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T07:57:42,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33265-0x100046e3ad00000, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:57:42,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43367-0x100046e3ad00001, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:57:42,211 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=0106a245d0e8,33265,1733644661788, sessionid=0x100046e3ad00000, setting cluster-up flag (Was=false) 2024-12-08T07:57:42,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43367-0x100046e3ad00001, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:57:42,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33265-0x100046e3ad00000, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:57:42,262 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-08T07:57:42,264 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0106a245d0e8,33265,1733644661788 2024-12-08T07:57:42,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33265-0x100046e3ad00000, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:57:42,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43367-0x100046e3ad00001, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:57:42,315 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-08T07:57:42,317 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0106a245d0e8,33265,1733644661788 2024-12-08T07:57:42,319 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-08T07:57:42,322 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-08T07:57:42,322 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-08T07:57:42,322 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-08T07:57:42,323 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0106a245d0e8,33265,1733644661788 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-08T07:57:42,325 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0106a245d0e8:0, corePoolSize=5, maxPoolSize=5 2024-12-08T07:57:42,325 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0106a245d0e8:0, corePoolSize=5, maxPoolSize=5 2024-12-08T07:57:42,325 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0106a245d0e8:0, corePoolSize=5, maxPoolSize=5 2024-12-08T07:57:42,326 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0106a245d0e8:0, corePoolSize=5, maxPoolSize=5 2024-12-08T07:57:42,326 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0106a245d0e8:0, corePoolSize=10, maxPoolSize=10 2024-12-08T07:57:42,326 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:42,326 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0106a245d0e8:0, corePoolSize=2, maxPoolSize=2 2024-12-08T07:57:42,326 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:42,327 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733644692327 2024-12-08T07:57:42,327 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-08T07:57:42,327 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-08T07:57:42,327 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-08T07:57:42,328 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-08T07:57:42,328 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-08T07:57:42,328 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-08T07:57:42,328 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:42,328 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-08T07:57:42,328 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-08T07:57:42,329 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T07:57:42,329 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-08T07:57:42,329 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-08T07:57:42,329 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-08T07:57:42,329 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-08T07:57:42,330 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.large.0-1733644662330,5,FailOnTimeoutGroup] 2024-12-08T07:57:42,330 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:57:42,331 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-08T07:57:42,333 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.small.0-1733644662330,5,FailOnTimeoutGroup] 2024-12-08T07:57:42,333 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:42,333 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-08T07:57:42,333 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:42,333 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:42,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42091 is added to blk_1073741831_1007 (size=1321) 2024-12-08T07:57:42,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42967 is added to blk_1073741831_1007 (size=1321) 2024-12-08T07:57:42,349 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-08T07:57:42,373 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621 2024-12-08T07:57:42,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42967 is added to blk_1073741832_1008 (size=32) 2024-12-08T07:57:42,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42091 is added to blk_1073741832_1008 (size=32) 2024-12-08T07:57:42,387 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T07:57:42,389 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T07:57:42,390 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T07:57:42,390 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:57:42,391 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:57:42,391 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T07:57:42,393 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T07:57:42,393 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:57:42,393 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:57:42,394 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T07:57:42,395 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T07:57:42,395 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:57:42,396 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:57:42,396 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T07:57:42,397 INFO [RS:0;0106a245d0e8:43367 {}] regionserver.HRegionServer(746): ClusterId : 0df9df2c-09b7-49c0-86c0-9d0c94422e3f 2024-12-08T07:57:42,397 DEBUG [RS:0;0106a245d0e8:43367 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T07:57:42,398 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T07:57:42,398 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:57:42,399 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:57:42,399 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T07:57:42,401 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/hbase/meta/1588230740 2024-12-08T07:57:42,402 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/hbase/meta/1588230740 2024-12-08T07:57:42,404 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T07:57:42,404 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T07:57:42,405 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T07:57:42,411 DEBUG [RS:0;0106a245d0e8:43367 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T07:57:42,411 DEBUG [RS:0;0106a245d0e8:43367 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T07:57:42,413 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T07:57:42,416 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T07:57:42,417 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=838621, jitterRate=0.06636220216751099}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T07:57:42,418 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733644662387Initializing all the Stores at 1733644662388 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644662388Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644662388Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644662388Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644662388Cleaning up temporary data from old regions at 1733644662404 (+16 ms)Region opened successfully at 1733644662418 (+14 ms) 2024-12-08T07:57:42,418 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T07:57:42,418 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T07:57:42,418 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T07:57:42,418 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T07:57:42,418 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T07:57:42,419 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T07:57:42,419 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733644662418Disabling compacts and flushes for region at 1733644662418Disabling writes for close at 1733644662418Writing region close event to WAL at 1733644662419 (+1 ms)Closed at 1733644662419 2024-12-08T07:57:42,422 DEBUG [RS:0;0106a245d0e8:43367 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T07:57:42,422 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T07:57:42,422 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-08T07:57:42,422 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-08T07:57:42,422 DEBUG [RS:0;0106a245d0e8:43367 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23a87de9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0106a245d0e8/172.17.0.2:0 2024-12-08T07:57:42,424 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T07:57:42,426 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-08T07:57:42,436 DEBUG [RS:0;0106a245d0e8:43367 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0106a245d0e8:43367 2024-12-08T07:57:42,436 INFO [RS:0;0106a245d0e8:43367 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T07:57:42,436 INFO [RS:0;0106a245d0e8:43367 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T07:57:42,436 DEBUG [RS:0;0106a245d0e8:43367 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T07:57:42,437 INFO [RS:0;0106a245d0e8:43367 {}] regionserver.HRegionServer(2659): reportForDuty to master=0106a245d0e8,33265,1733644661788 with port=43367, startcode=1733644661968 2024-12-08T07:57:42,438 DEBUG [RS:0;0106a245d0e8:43367 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T07:57:42,444 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57727, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T07:57:42,445 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33265 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0106a245d0e8,43367,1733644661968 2024-12-08T07:57:42,445 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33265 {}] master.ServerManager(517): Registering regionserver=0106a245d0e8,43367,1733644661968 2024-12-08T07:57:42,447 DEBUG [RS:0;0106a245d0e8:43367 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621 2024-12-08T07:57:42,447 DEBUG [RS:0;0106a245d0e8:43367 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37725 2024-12-08T07:57:42,447 DEBUG [RS:0;0106a245d0e8:43367 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T07:57:42,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33265-0x100046e3ad00000, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T07:57:42,453 DEBUG [RS:0;0106a245d0e8:43367 {}] zookeeper.ZKUtil(111): regionserver:43367-0x100046e3ad00001, quorum=127.0.0.1:49932, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0106a245d0e8,43367,1733644661968 2024-12-08T07:57:42,453 WARN [RS:0;0106a245d0e8:43367 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T07:57:42,453 INFO [RS:0;0106a245d0e8:43367 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T07:57:42,453 DEBUG [RS:0;0106a245d0e8:43367 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968 2024-12-08T07:57:42,454 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0106a245d0e8,43367,1733644661968] 2024-12-08T07:57:42,460 INFO [RS:0;0106a245d0e8:43367 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T07:57:42,462 INFO [RS:0;0106a245d0e8:43367 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T07:57:42,463 INFO [RS:0;0106a245d0e8:43367 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T07:57:42,463 INFO [RS:0;0106a245d0e8:43367 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:42,465 INFO [RS:0;0106a245d0e8:43367 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T07:57:42,466 INFO [RS:0;0106a245d0e8:43367 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T07:57:42,466 INFO [RS:0;0106a245d0e8:43367 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:42,466 DEBUG [RS:0;0106a245d0e8:43367 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:42,466 DEBUG [RS:0;0106a245d0e8:43367 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:42,467 DEBUG [RS:0;0106a245d0e8:43367 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:42,467 DEBUG [RS:0;0106a245d0e8:43367 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:42,467 DEBUG [RS:0;0106a245d0e8:43367 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:42,467 DEBUG [RS:0;0106a245d0e8:43367 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0106a245d0e8:0, corePoolSize=2, maxPoolSize=2 2024-12-08T07:57:42,467 DEBUG [RS:0;0106a245d0e8:43367 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:42,467 DEBUG [RS:0;0106a245d0e8:43367 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:42,467 DEBUG [RS:0;0106a245d0e8:43367 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:42,467 DEBUG [RS:0;0106a245d0e8:43367 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:42,467 DEBUG [RS:0;0106a245d0e8:43367 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:42,467 DEBUG [RS:0;0106a245d0e8:43367 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:42,467 DEBUG [RS:0;0106a245d0e8:43367 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0106a245d0e8:0, corePoolSize=3, maxPoolSize=3 2024-12-08T07:57:42,467 DEBUG [RS:0;0106a245d0e8:43367 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0, corePoolSize=3, maxPoolSize=3 2024-12-08T07:57:42,468 INFO [RS:0;0106a245d0e8:43367 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:42,468 INFO [RS:0;0106a245d0e8:43367 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:42,468 INFO [RS:0;0106a245d0e8:43367 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:42,468 INFO [RS:0;0106a245d0e8:43367 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:42,468 INFO [RS:0;0106a245d0e8:43367 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:42,468 INFO [RS:0;0106a245d0e8:43367 {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,43367,1733644661968-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T07:57:42,485 INFO [RS:0;0106a245d0e8:43367 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T07:57:42,485 INFO [RS:0;0106a245d0e8:43367 {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,43367,1733644661968-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:42,485 INFO [RS:0;0106a245d0e8:43367 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:42,485 INFO [RS:0;0106a245d0e8:43367 {}] regionserver.Replication(171): 0106a245d0e8,43367,1733644661968 started 2024-12-08T07:57:42,498 INFO [RS:0;0106a245d0e8:43367 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:42,498 INFO [RS:0;0106a245d0e8:43367 {}] regionserver.HRegionServer(1482): Serving as 0106a245d0e8,43367,1733644661968, RpcServer on 0106a245d0e8/172.17.0.2:43367, sessionid=0x100046e3ad00001 2024-12-08T07:57:42,498 DEBUG [RS:0;0106a245d0e8:43367 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T07:57:42,498 DEBUG [RS:0;0106a245d0e8:43367 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0106a245d0e8,43367,1733644661968 2024-12-08T07:57:42,498 DEBUG [RS:0;0106a245d0e8:43367 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0106a245d0e8,43367,1733644661968' 2024-12-08T07:57:42,498 DEBUG [RS:0;0106a245d0e8:43367 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T07:57:42,499 DEBUG [RS:0;0106a245d0e8:43367 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T07:57:42,499 DEBUG [RS:0;0106a245d0e8:43367 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T07:57:42,499 DEBUG [RS:0;0106a245d0e8:43367 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T07:57:42,499 DEBUG [RS:0;0106a245d0e8:43367 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0106a245d0e8,43367,1733644661968 2024-12-08T07:57:42,499 DEBUG [RS:0;0106a245d0e8:43367 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0106a245d0e8,43367,1733644661968' 2024-12-08T07:57:42,499 DEBUG [RS:0;0106a245d0e8:43367 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T07:57:42,500 DEBUG [RS:0;0106a245d0e8:43367 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T07:57:42,500 DEBUG [RS:0;0106a245d0e8:43367 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T07:57:42,500 INFO [RS:0;0106a245d0e8:43367 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T07:57:42,500 INFO [RS:0;0106a245d0e8:43367 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T07:57:42,576 WARN [0106a245d0e8:33265 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-08T07:57:42,603 INFO [RS:0;0106a245d0e8:43367 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0106a245d0e8%2C43367%2C1733644661968, suffix=, logDir=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968, archiveDir=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/oldWALs, maxLogs=32 2024-12-08T07:57:42,603 INFO [RS:0;0106a245d0e8:43367 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C43367%2C1733644661968.1733644662603 2024-12-08T07:57:42,613 INFO [RS:0;0106a245d0e8:43367 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644662603 2024-12-08T07:57:42,615 DEBUG [RS:0;0106a245d0e8:43367 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35315:35315),(127.0.0.1/127.0.0.1:36731:36731)] 2024-12-08T07:57:42,827 DEBUG [0106a245d0e8:33265 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-08T07:57:42,827 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0106a245d0e8,43367,1733644661968 2024-12-08T07:57:42,829 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0106a245d0e8,43367,1733644661968, state=OPENING 2024-12-08T07:57:42,873 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-08T07:57:42,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43367-0x100046e3ad00001, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:57:42,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33265-0x100046e3ad00000, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:57:42,884 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T07:57:42,884 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T07:57:42,884 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T07:57:42,884 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=0106a245d0e8,43367,1733644661968}] 2024-12-08T07:57:43,038 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-08T07:57:43,040 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50897, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-08T07:57:43,045 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-08T07:57:43,045 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T07:57:43,048 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0106a245d0e8%2C43367%2C1733644661968.meta, suffix=.meta, logDir=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968, archiveDir=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/oldWALs, maxLogs=32 2024-12-08T07:57:43,048 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta 2024-12-08T07:57:43,054 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta 2024-12-08T07:57:43,055 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36731:36731),(127.0.0.1/127.0.0.1:35315:35315)] 2024-12-08T07:57:43,055 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-08T07:57:43,056 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-08T07:57:43,056 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-08T07:57:43,056 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-08T07:57:43,056 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-08T07:57:43,056 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T07:57:43,056 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-08T07:57:43,056 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-08T07:57:43,058 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T07:57:43,059 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T07:57:43,059 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:57:43,059 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:57:43,059 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T07:57:43,060 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T07:57:43,060 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:57:43,061 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:57:43,061 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T07:57:43,061 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T07:57:43,061 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:57:43,062 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:57:43,062 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T07:57:43,063 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T07:57:43,063 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:57:43,063 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:57:43,064 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T07:57:43,064 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/hbase/meta/1588230740 2024-12-08T07:57:43,066 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/hbase/meta/1588230740 2024-12-08T07:57:43,067 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T07:57:43,068 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T07:57:43,068 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T07:57:43,070 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T07:57:43,071 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=779009, jitterRate=-0.009439602494239807}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T07:57:43,071 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-08T07:57:43,072 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733644663057Writing region info on filesystem at 1733644663057Initializing all the Stores at 1733644663057Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644663057Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644663058 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644663058Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644663058Cleaning up temporary data from old regions at 1733644663068 (+10 ms)Running coprocessor post-open hooks at 1733644663071 (+3 ms)Region opened successfully at 1733644663072 (+1 ms) 2024-12-08T07:57:43,073 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733644663037 2024-12-08T07:57:43,076 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-08T07:57:43,076 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-08T07:57:43,077 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0106a245d0e8,43367,1733644661968 2024-12-08T07:57:43,078 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0106a245d0e8,43367,1733644661968, state=OPEN 2024-12-08T07:57:43,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43367-0x100046e3ad00001, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T07:57:43,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33265-0x100046e3ad00000, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T07:57:43,113 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=0106a245d0e8,43367,1733644661968 2024-12-08T07:57:43,113 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T07:57:43,113 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T07:57:43,117 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-08T07:57:43,117 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=0106a245d0e8,43367,1733644661968 in 229 msec 2024-12-08T07:57:43,120 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-08T07:57:43,121 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 695 msec 2024-12-08T07:57:43,122 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T07:57:43,122 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-08T07:57:43,124 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T07:57:43,124 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0106a245d0e8,43367,1733644661968, seqNum=-1] 2024-12-08T07:57:43,125 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T07:57:43,127 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56991, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T07:57:43,135 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 813 msec 2024-12-08T07:57:43,135 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733644663135, completionTime=-1 2024-12-08T07:57:43,135 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-08T07:57:43,135 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-08T07:57:43,138 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-08T07:57:43,138 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733644723138 2024-12-08T07:57:43,138 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733644783138 2024-12-08T07:57:43,138 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-08T07:57:43,138 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,33265,1733644661788-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:43,139 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,33265,1733644661788-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:43,139 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,33265,1733644661788-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:43,139 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0106a245d0e8:33265, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:43,139 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:43,140 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:43,141 DEBUG [master/0106a245d0e8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-08T07:57:43,144 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.113sec 2024-12-08T07:57:43,144 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-08T07:57:43,144 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-08T07:57:43,144 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-08T07:57:43,144 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-08T07:57:43,144 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-08T07:57:43,144 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,33265,1733644661788-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T07:57:43,144 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,33265,1733644661788-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-08T07:57:43,147 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-08T07:57:43,147 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-08T07:57:43,147 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,33265,1733644661788-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:43,197 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@142e8458, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T07:57:43,197 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 0106a245d0e8,33265,-1 for getting cluster id 2024-12-08T07:57:43,197 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-08T07:57:43,199 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0df9df2c-09b7-49c0-86c0-9d0c94422e3f' 2024-12-08T07:57:43,199 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-08T07:57:43,199 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0df9df2c-09b7-49c0-86c0-9d0c94422e3f" 2024-12-08T07:57:43,200 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44eb01bf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T07:57:43,200 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [0106a245d0e8,33265,-1] 2024-12-08T07:57:43,200 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-08T07:57:43,200 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T07:57:43,202 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34166, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-08T07:57:43,203 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ee117d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T07:57:43,204 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T07:57:43,205 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0106a245d0e8,43367,1733644661968, seqNum=-1] 2024-12-08T07:57:43,206 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T07:57:43,208 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51564, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T07:57:43,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=0106a245d0e8,33265,1733644661788 2024-12-08T07:57:43,210 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:57:43,214 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-08T07:57:43,236 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0106a245d0e8:0 server-side Connection retries=45 2024-12-08T07:57:43,236 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T07:57:43,237 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T07:57:43,237 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T07:57:43,237 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T07:57:43,237 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T07:57:43,237 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T07:57:43,237 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T07:57:43,238 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42555 2024-12-08T07:57:43,240 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42555 connecting to ZooKeeper ensemble=127.0.0.1:49932 2024-12-08T07:57:43,241 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:57:43,243 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:57:43,271 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:425550x0, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T07:57:43,271 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-12-08T07:57:43,271 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:42555-0x100046e3ad00002, quorum=127.0.0.1:49932, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-12-08T07:57:43,271 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42555-0x100046e3ad00002 connected 2024-12-08T07:57:43,272 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T07:57:43,272 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T07:57:43,273 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:42555-0x100046e3ad00002, quorum=127.0.0.1:49932, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T07:57:43,274 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42555-0x100046e3ad00002, quorum=127.0.0.1:49932, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T07:57:43,274 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42555 2024-12-08T07:57:43,275 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42555 2024-12-08T07:57:43,275 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42555 2024-12-08T07:57:43,275 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42555 2024-12-08T07:57:43,275 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42555 2024-12-08T07:57:43,276 INFO [RS:1;0106a245d0e8:42555 {}] regionserver.HRegionServer(746): ClusterId : 0df9df2c-09b7-49c0-86c0-9d0c94422e3f 2024-12-08T07:57:43,276 DEBUG [RS:1;0106a245d0e8:42555 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T07:57:43,284 DEBUG [RS:1;0106a245d0e8:42555 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T07:57:43,284 DEBUG [RS:1;0106a245d0e8:42555 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T07:57:43,295 DEBUG [RS:1;0106a245d0e8:42555 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T07:57:43,295 DEBUG [RS:1;0106a245d0e8:42555 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70e08ad3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0106a245d0e8/172.17.0.2:0 2024-12-08T07:57:43,310 DEBUG [RS:1;0106a245d0e8:42555 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;0106a245d0e8:42555 2024-12-08T07:57:43,310 INFO [RS:1;0106a245d0e8:42555 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T07:57:43,310 INFO [RS:1;0106a245d0e8:42555 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T07:57:43,310 DEBUG [RS:1;0106a245d0e8:42555 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T07:57:43,311 INFO [RS:1;0106a245d0e8:42555 {}] regionserver.HRegionServer(2659): reportForDuty to master=0106a245d0e8,33265,1733644661788 with port=42555, startcode=1733644663236 2024-12-08T07:57:43,311 DEBUG [RS:1;0106a245d0e8:42555 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T07:57:43,312 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58719, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T07:57:43,313 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33265 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0106a245d0e8,42555,1733644663236 2024-12-08T07:57:43,313 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33265 {}] master.ServerManager(517): Registering regionserver=0106a245d0e8,42555,1733644663236 2024-12-08T07:57:43,314 DEBUG [RS:1;0106a245d0e8:42555 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621 2024-12-08T07:57:43,314 DEBUG [RS:1;0106a245d0e8:42555 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37725 2024-12-08T07:57:43,314 DEBUG [RS:1;0106a245d0e8:42555 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T07:57:43,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33265-0x100046e3ad00000, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T07:57:43,326 DEBUG [RS:1;0106a245d0e8:42555 {}] zookeeper.ZKUtil(111): regionserver:42555-0x100046e3ad00002, quorum=127.0.0.1:49932, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0106a245d0e8,42555,1733644663236 2024-12-08T07:57:43,326 WARN [RS:1;0106a245d0e8:42555 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T07:57:43,326 INFO [RS:1;0106a245d0e8:42555 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T07:57:43,326 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0106a245d0e8,42555,1733644663236] 2024-12-08T07:57:43,326 DEBUG [RS:1;0106a245d0e8:42555 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236 2024-12-08T07:57:43,330 INFO [RS:1;0106a245d0e8:42555 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T07:57:43,334 INFO [RS:1;0106a245d0e8:42555 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T07:57:43,334 INFO [RS:1;0106a245d0e8:42555 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T07:57:43,334 INFO [RS:1;0106a245d0e8:42555 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:43,335 INFO [RS:1;0106a245d0e8:42555 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T07:57:43,336 INFO [RS:1;0106a245d0e8:42555 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T07:57:43,336 INFO [RS:1;0106a245d0e8:42555 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:43,336 DEBUG [RS:1;0106a245d0e8:42555 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:43,336 DEBUG [RS:1;0106a245d0e8:42555 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:43,337 DEBUG [RS:1;0106a245d0e8:42555 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:43,337 DEBUG [RS:1;0106a245d0e8:42555 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:43,337 DEBUG [RS:1;0106a245d0e8:42555 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:43,337 DEBUG [RS:1;0106a245d0e8:42555 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0106a245d0e8:0, corePoolSize=2, maxPoolSize=2 2024-12-08T07:57:43,337 DEBUG [RS:1;0106a245d0e8:42555 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:43,337 DEBUG [RS:1;0106a245d0e8:42555 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:43,338 DEBUG [RS:1;0106a245d0e8:42555 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:43,338 DEBUG [RS:1;0106a245d0e8:42555 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:43,338 DEBUG [RS:1;0106a245d0e8:42555 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:43,338 DEBUG [RS:1;0106a245d0e8:42555 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:57:43,338 DEBUG [RS:1;0106a245d0e8:42555 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0106a245d0e8:0, corePoolSize=3, maxPoolSize=3 2024-12-08T07:57:43,338 DEBUG [RS:1;0106a245d0e8:42555 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0, corePoolSize=3, maxPoolSize=3 2024-12-08T07:57:43,339 INFO [RS:1;0106a245d0e8:42555 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:43,339 INFO [RS:1;0106a245d0e8:42555 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:43,339 INFO [RS:1;0106a245d0e8:42555 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:43,339 INFO [RS:1;0106a245d0e8:42555 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:43,339 INFO [RS:1;0106a245d0e8:42555 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:43,339 INFO [RS:1;0106a245d0e8:42555 {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,42555,1733644663236-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T07:57:43,351 INFO [RS:1;0106a245d0e8:42555 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T07:57:43,351 INFO [RS:1;0106a245d0e8:42555 {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,42555,1733644663236-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:43,351 INFO [RS:1;0106a245d0e8:42555 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:43,351 INFO [RS:1;0106a245d0e8:42555 {}] regionserver.Replication(171): 0106a245d0e8,42555,1733644663236 started 2024-12-08T07:57:43,362 INFO [RS:1;0106a245d0e8:42555 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T07:57:43,362 INFO [RS:1;0106a245d0e8:42555 {}] regionserver.HRegionServer(1482): Serving as 0106a245d0e8,42555,1733644663236, RpcServer on 0106a245d0e8/172.17.0.2:42555, sessionid=0x100046e3ad00002 2024-12-08T07:57:43,362 DEBUG [RS:1;0106a245d0e8:42555 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T07:57:43,362 DEBUG [RS:1;0106a245d0e8:42555 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0106a245d0e8,42555,1733644663236 2024-12-08T07:57:43,362 DEBUG [RS:1;0106a245d0e8:42555 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0106a245d0e8,42555,1733644663236' 2024-12-08T07:57:43,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;0106a245d0e8:42555,5,FailOnTimeoutGroup] 2024-12-08T07:57:43,362 DEBUG [RS:1;0106a245d0e8:42555 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T07:57:43,362 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-12-08T07:57:43,363 DEBUG [RS:1;0106a245d0e8:42555 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T07:57:43,363 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-08T07:57:43,363 DEBUG [RS:1;0106a245d0e8:42555 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T07:57:43,363 DEBUG [RS:1;0106a245d0e8:42555 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T07:57:43,363 DEBUG [RS:1;0106a245d0e8:42555 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0106a245d0e8,42555,1733644663236 2024-12-08T07:57:43,363 DEBUG [RS:1;0106a245d0e8:42555 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0106a245d0e8,42555,1733644663236' 2024-12-08T07:57:43,363 DEBUG [RS:1;0106a245d0e8:42555 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T07:57:43,364 DEBUG [RS:1;0106a245d0e8:42555 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T07:57:43,364 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 0106a245d0e8,33265,1733644661788 2024-12-08T07:57:43,364 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3ee6ce85 2024-12-08T07:57:43,364 DEBUG [RS:1;0106a245d0e8:42555 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T07:57:43,364 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T07:57:43,364 INFO [RS:1;0106a245d0e8:42555 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T07:57:43,364 INFO [RS:1;0106a245d0e8:42555 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T07:57:43,366 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34170, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T07:57:43,366 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33265 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-08T07:57:43,366 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33265 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-08T07:57:43,366 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33265 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T07:57:43,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33265 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-12-08T07:57:43,369 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T07:57:43,369 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:57:43,369 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33265 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-12-08T07:57:43,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33265 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T07:57:43,371 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T07:57:43,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42091 is added to blk_1073741835_1011 (size=393) 2024-12-08T07:57:43,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42967 is added to blk_1073741835_1011 (size=393) 2024-12-08T07:57:43,380 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 00c40378502d1ce97c64d133305e3b9a, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621 2024-12-08T07:57:43,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42967 is added to blk_1073741836_1012 (size=76) 2024-12-08T07:57:43,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42091 is added to blk_1073741836_1012 (size=76) 2024-12-08T07:57:43,390 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T07:57:43,390 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 00c40378502d1ce97c64d133305e3b9a, disabling compactions & flushes 2024-12-08T07:57:43,390 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a. 2024-12-08T07:57:43,390 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a. 2024-12-08T07:57:43,390 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a. after waiting 0 ms 2024-12-08T07:57:43,390 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a. 2024-12-08T07:57:43,390 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a. 2024-12-08T07:57:43,390 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 00c40378502d1ce97c64d133305e3b9a: Waiting for close lock at 1733644663390Disabling compacts and flushes for region at 1733644663390Disabling writes for close at 1733644663390Writing region close event to WAL at 1733644663390Closed at 1733644663390 2024-12-08T07:57:43,391 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T07:57:43,392 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733644663391"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733644663391"}]},"ts":"1733644663391"} 2024-12-08T07:57:43,394 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-08T07:57:43,395 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T07:57:43,396 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733644663396"}]},"ts":"1733644663396"} 2024-12-08T07:57:43,398 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-12-08T07:57:43,398 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=00c40378502d1ce97c64d133305e3b9a, ASSIGN}] 2024-12-08T07:57:43,400 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=00c40378502d1ce97c64d133305e3b9a, ASSIGN 2024-12-08T07:57:43,401 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=00c40378502d1ce97c64d133305e3b9a, ASSIGN; state=OFFLINE, location=0106a245d0e8,43367,1733644661968; forceNewPlan=false, retain=false 2024-12-08T07:57:43,467 INFO [RS:1;0106a245d0e8:42555 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0106a245d0e8%2C42555%2C1733644663236, suffix=, logDir=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236, archiveDir=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/oldWALs, maxLogs=32 2024-12-08T07:57:43,469 INFO [RS:1;0106a245d0e8:42555 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C42555%2C1733644663236.1733644663469 2024-12-08T07:57:43,478 INFO [RS:1;0106a245d0e8:42555 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 2024-12-08T07:57:43,483 DEBUG [RS:1;0106a245d0e8:42555 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36731:36731),(127.0.0.1/127.0.0.1:35315:35315)] 2024-12-08T07:57:43,552 INFO [0106a245d0e8:33265 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-08T07:57:43,552 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=00c40378502d1ce97c64d133305e3b9a, regionState=OPENING, regionLocation=0106a245d0e8,43367,1733644661968 2024-12-08T07:57:43,554 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=00c40378502d1ce97c64d133305e3b9a, ASSIGN because future has completed 2024-12-08T07:57:43,555 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 00c40378502d1ce97c64d133305e3b9a, server=0106a245d0e8,43367,1733644661968}] 2024-12-08T07:57:43,719 INFO [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a. 2024-12-08T07:57:43,720 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 00c40378502d1ce97c64d133305e3b9a, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a.', STARTKEY => '', ENDKEY => ''} 2024-12-08T07:57:43,720 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 00c40378502d1ce97c64d133305e3b9a 2024-12-08T07:57:43,720 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T07:57:43,721 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 00c40378502d1ce97c64d133305e3b9a 2024-12-08T07:57:43,721 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 00c40378502d1ce97c64d133305e3b9a 2024-12-08T07:57:43,723 INFO [StoreOpener-00c40378502d1ce97c64d133305e3b9a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 00c40378502d1ce97c64d133305e3b9a 2024-12-08T07:57:43,724 INFO [StoreOpener-00c40378502d1ce97c64d133305e3b9a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 00c40378502d1ce97c64d133305e3b9a columnFamilyName info 2024-12-08T07:57:43,724 DEBUG [StoreOpener-00c40378502d1ce97c64d133305e3b9a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:57:43,725 INFO [StoreOpener-00c40378502d1ce97c64d133305e3b9a-1 {}] regionserver.HStore(327): Store=00c40378502d1ce97c64d133305e3b9a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T07:57:43,725 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 00c40378502d1ce97c64d133305e3b9a 2024-12-08T07:57:43,726 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a 2024-12-08T07:57:43,727 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a 2024-12-08T07:57:43,727 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 00c40378502d1ce97c64d133305e3b9a 2024-12-08T07:57:43,727 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 00c40378502d1ce97c64d133305e3b9a 2024-12-08T07:57:43,730 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 00c40378502d1ce97c64d133305e3b9a 2024-12-08T07:57:43,733 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T07:57:43,734 INFO [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 00c40378502d1ce97c64d133305e3b9a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=779232, jitterRate=-0.009156063199043274}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-08T07:57:43,734 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 00c40378502d1ce97c64d133305e3b9a 2024-12-08T07:57:43,736 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 00c40378502d1ce97c64d133305e3b9a: Running coprocessor pre-open hook at 1733644663721Writing region info on filesystem at 1733644663721Initializing all the Stores at 1733644663722 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644663722Cleaning up temporary data from old regions at 1733644663727 (+5 ms)Running coprocessor post-open hooks at 1733644663734 (+7 ms)Region opened successfully at 1733644663736 (+2 ms) 2024-12-08T07:57:43,737 INFO [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a., pid=6, masterSystemTime=1733644663709 2024-12-08T07:57:43,739 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a. 2024-12-08T07:57:43,739 INFO [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a. 2024-12-08T07:57:43,740 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=00c40378502d1ce97c64d133305e3b9a, regionState=OPEN, openSeqNum=2, regionLocation=0106a245d0e8,43367,1733644661968 2024-12-08T07:57:43,742 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 00c40378502d1ce97c64d133305e3b9a, server=0106a245d0e8,43367,1733644661968 because future has completed 2024-12-08T07:57:43,746 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-08T07:57:43,747 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 00c40378502d1ce97c64d133305e3b9a, server=0106a245d0e8,43367,1733644661968 in 188 msec 2024-12-08T07:57:43,749 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-08T07:57:43,749 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=00c40378502d1ce97c64d133305e3b9a, ASSIGN in 348 msec 2024-12-08T07:57:43,750 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T07:57:43,750 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733644663750"}]},"ts":"1733644663750"} 2024-12-08T07:57:43,752 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-12-08T07:57:43,753 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T07:57:43,755 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 387 msec 2024-12-08T07:57:48,559 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-08T07:57:48,562 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:57:48,579 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:57:48,580 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:57:48,581 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:57:48,587 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-08T07:57:48,587 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-12-08T07:57:48,588 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-12-08T07:57:53,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33265 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T07:57:53,452 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-12-08T07:57:53,452 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-12-08T07:57:53,458 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-12-08T07:57:53,458 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a. 2024-12-08T07:57:53,472 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T07:57:53,475 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T07:57:53,475 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T07:57:53,475 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T07:57:53,475 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T07:57:53,476 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c68f920{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/hadoop.log.dir/,AVAILABLE} 2024-12-08T07:57:53,476 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65349436{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T07:57:53,576 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@304af6f5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/java.io.tmpdir/jetty-localhost-45847-hadoop-hdfs-3_4_1-tests_jar-_-any-2485806075344088452/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:57:53,576 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@41559526{HTTP/1.1, (http/1.1)}{localhost:45847} 2024-12-08T07:57:53,577 INFO [Time-limited test {}] server.Server(415): Started @121144ms 2024-12-08T07:57:53,578 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T07:57:53,603 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T07:57:53,606 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T07:57:53,606 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T07:57:53,606 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T07:57:53,607 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T07:57:53,607 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3904e150{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/hadoop.log.dir/,AVAILABLE} 2024-12-08T07:57:53,607 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@514acf4b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T07:57:53,696 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4009f856{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/java.io.tmpdir/jetty-localhost-35579-hadoop-hdfs-3_4_1-tests_jar-_-any-6468919019510079693/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:57:53,697 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7b4117c9{HTTP/1.1, (http/1.1)}{localhost:35579} 2024-12-08T07:57:53,697 INFO [Time-limited test {}] server.Server(415): Started @121265ms 2024-12-08T07:57:53,698 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T07:57:53,725 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T07:57:53,728 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T07:57:53,729 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T07:57:53,729 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T07:57:53,729 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T07:57:53,729 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3e1ad43e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/hadoop.log.dir/,AVAILABLE} 2024-12-08T07:57:53,729 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@136e75a1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T07:57:53,818 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@638f230f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/java.io.tmpdir/jetty-localhost-33127-hadoop-hdfs-3_4_1-tests_jar-_-any-1194676277730956926/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:57:53,818 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@69bcca{HTTP/1.1, (http/1.1)}{localhost:33127} 2024-12-08T07:57:53,818 INFO [Time-limited test {}] server.Server(415): Started @121386ms 2024-12-08T07:57:53,819 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T07:57:54,966 WARN [Thread-868 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data5/current/BP-997826538-172.17.0.2-1733644659391/current, will proceed with Du for space computation calculation, 2024-12-08T07:57:54,966 WARN [Thread-869 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data6/current/BP-997826538-172.17.0.2-1733644659391/current, will proceed with Du for space computation calculation, 2024-12-08T07:57:54,989 WARN [Thread-809 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T07:57:54,991 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x83f4488d68ba5f5e with lease ID 0x98c7d3aa439fb3af: Processing first storage report for DS-c578cac0-fb58-4498-8432-04367948e478 from datanode DatanodeRegistration(127.0.0.1:34057, datanodeUuid=1749c9bd-5e00-409e-94fc-ba33301b0ccc, infoPort=40587, infoSecurePort=0, ipcPort=42479, storageInfo=lv=-57;cid=testClusterID;nsid=466817069;c=1733644659391) 2024-12-08T07:57:54,991 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x83f4488d68ba5f5e with lease ID 0x98c7d3aa439fb3af: from storage DS-c578cac0-fb58-4498-8432-04367948e478 node DatanodeRegistration(127.0.0.1:34057, datanodeUuid=1749c9bd-5e00-409e-94fc-ba33301b0ccc, infoPort=40587, infoSecurePort=0, ipcPort=42479, storageInfo=lv=-57;cid=testClusterID;nsid=466817069;c=1733644659391), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:57:54,991 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x83f4488d68ba5f5e with lease ID 0x98c7d3aa439fb3af: Processing first storage report for DS-d73018ae-46c0-4e7d-91ff-4120f6a7355f from datanode DatanodeRegistration(127.0.0.1:34057, datanodeUuid=1749c9bd-5e00-409e-94fc-ba33301b0ccc, infoPort=40587, infoSecurePort=0, ipcPort=42479, storageInfo=lv=-57;cid=testClusterID;nsid=466817069;c=1733644659391) 2024-12-08T07:57:54,991 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x83f4488d68ba5f5e with lease ID 0x98c7d3aa439fb3af: from storage DS-d73018ae-46c0-4e7d-91ff-4120f6a7355f node DatanodeRegistration(127.0.0.1:34057, datanodeUuid=1749c9bd-5e00-409e-94fc-ba33301b0ccc, infoPort=40587, infoSecurePort=0, ipcPort=42479, storageInfo=lv=-57;cid=testClusterID;nsid=466817069;c=1733644659391), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:57:55,125 WARN [Thread-879 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data7/current/BP-997826538-172.17.0.2-1733644659391/current, will proceed with Du for space computation calculation, 2024-12-08T07:57:55,125 WARN [Thread-880 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data8/current/BP-997826538-172.17.0.2-1733644659391/current, will proceed with Du for space computation calculation, 2024-12-08T07:57:55,145 WARN [Thread-831 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T07:57:55,148 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc8e9b8243a8171fc with lease ID 0x98c7d3aa439fb3b0: Processing first storage report for DS-397f1186-2937-4e67-95f3-b674324c9d6c from datanode DatanodeRegistration(127.0.0.1:46459, datanodeUuid=835a85d9-37ea-4994-a27d-87f31023e8c1, infoPort=36125, infoSecurePort=0, ipcPort=38505, storageInfo=lv=-57;cid=testClusterID;nsid=466817069;c=1733644659391) 2024-12-08T07:57:55,148 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc8e9b8243a8171fc with lease ID 0x98c7d3aa439fb3b0: from storage DS-397f1186-2937-4e67-95f3-b674324c9d6c node DatanodeRegistration(127.0.0.1:46459, datanodeUuid=835a85d9-37ea-4994-a27d-87f31023e8c1, infoPort=36125, infoSecurePort=0, ipcPort=38505, storageInfo=lv=-57;cid=testClusterID;nsid=466817069;c=1733644659391), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:57:55,148 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc8e9b8243a8171fc with lease ID 0x98c7d3aa439fb3b0: Processing first storage report for DS-2f48b163-a00a-404e-906b-1008fe8f240a from datanode DatanodeRegistration(127.0.0.1:46459, datanodeUuid=835a85d9-37ea-4994-a27d-87f31023e8c1, infoPort=36125, infoSecurePort=0, ipcPort=38505, storageInfo=lv=-57;cid=testClusterID;nsid=466817069;c=1733644659391) 2024-12-08T07:57:55,148 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc8e9b8243a8171fc with lease ID 0x98c7d3aa439fb3b0: from storage DS-2f48b163-a00a-404e-906b-1008fe8f240a node DatanodeRegistration(127.0.0.1:46459, datanodeUuid=835a85d9-37ea-4994-a27d-87f31023e8c1, infoPort=36125, infoSecurePort=0, ipcPort=38505, storageInfo=lv=-57;cid=testClusterID;nsid=466817069;c=1733644659391), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:57:55,307 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data9/current/BP-997826538-172.17.0.2-1733644659391/current, will proceed with Du for space computation calculation, 2024-12-08T07:57:55,307 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data10/current/BP-997826538-172.17.0.2-1733644659391/current, will proceed with Du for space computation calculation, 2024-12-08T07:57:55,327 WARN [Thread-853 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T07:57:55,329 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7c7905a6d38bb55a with lease ID 0x98c7d3aa439fb3b1: Processing first storage report for DS-4310500d-8ce5-4c64-9946-9148c0439e10 from datanode DatanodeRegistration(127.0.0.1:44111, datanodeUuid=a5c7702b-eb77-46f1-89eb-e3a9b6af1282, infoPort=35363, infoSecurePort=0, ipcPort=33543, storageInfo=lv=-57;cid=testClusterID;nsid=466817069;c=1733644659391) 2024-12-08T07:57:55,330 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7c7905a6d38bb55a with lease ID 0x98c7d3aa439fb3b1: from storage DS-4310500d-8ce5-4c64-9946-9148c0439e10 node DatanodeRegistration(127.0.0.1:44111, datanodeUuid=a5c7702b-eb77-46f1-89eb-e3a9b6af1282, infoPort=35363, infoSecurePort=0, ipcPort=33543, storageInfo=lv=-57;cid=testClusterID;nsid=466817069;c=1733644659391), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:57:55,330 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7c7905a6d38bb55a with lease ID 0x98c7d3aa439fb3b1: Processing first storage report for DS-d39d2c87-4fc0-438b-97a5-d53801d79839 from datanode DatanodeRegistration(127.0.0.1:44111, datanodeUuid=a5c7702b-eb77-46f1-89eb-e3a9b6af1282, infoPort=35363, infoSecurePort=0, ipcPort=33543, storageInfo=lv=-57;cid=testClusterID;nsid=466817069;c=1733644659391) 2024-12-08T07:57:55,330 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7c7905a6d38bb55a with lease ID 0x98c7d3aa439fb3b1: from storage DS-d39d2c87-4fc0-438b-97a5-d53801d79839 node DatanodeRegistration(127.0.0.1:44111, datanodeUuid=a5c7702b-eb77-46f1-89eb-e3a9b6af1282, infoPort=35363, infoSecurePort=0, ipcPort=33543, storageInfo=lv=-57;cid=testClusterID;nsid=466817069;c=1733644659391), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:57:55,351 WARN [ResponseProcessor for block BP-997826538-172.17.0.2-1733644659391:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-997826538-172.17.0.2-1733644659391:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:57:55,351 WARN [ResponseProcessor for block BP-997826538-172.17.0.2-1733644659391:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-997826538-172.17.0.2-1733644659391:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:57:55,352 WARN [ResponseProcessor for block BP-997826538-172.17.0.2-1733644659391:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-997826538-172.17.0.2-1733644659391:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-997826538-172.17.0.2-1733644659391:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:57:55,352 WARN [ResponseProcessor for block BP-997826538-172.17.0.2-1733644659391:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-997826538-172.17.0.2-1733644659391:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-997826538-172.17.0.2-1733644659391:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:57:55,352 WARN [DataStreamer for file /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 block BP-997826538-172.17.0.2-1733644659391:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK], DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK]) is bad. 2024-12-08T07:57:55,352 WARN [DataStreamer for file /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/WALs/0106a245d0e8,33265,1733644661788/0106a245d0e8%2C33265%2C1733644661788.1733644662110 block BP-997826538-172.17.0.2-1733644659391:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK], DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK]) is bad. 2024-12-08T07:57:55,352 WARN [DataStreamer for file /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta block BP-997826538-172.17.0.2-1733644659391:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK], DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK]) is bad. 2024-12-08T07:57:55,352 WARN [PacketResponder: BP-997826538-172.17.0.2-1733644659391:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:42091] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:57:55,353 WARN [DataStreamer for file /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644662603 block BP-997826538-172.17.0.2-1733644659391:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK], DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK]) is bad. 2024-12-08T07:57:55,353 WARN [PacketResponder: BP-997826538-172.17.0.2-1733644659391:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:42091] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:57:55,353 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1732916473_22 at /127.0.0.1:58374 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:42967:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58374 dst: /127.0.0.1:42967 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:57:55,353 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:40570 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:42091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40570 dst: /127.0.0.1:42091 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:57:55,353 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_954362276_22 at /127.0.0.1:40600 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:42091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40600 dst: /127.0.0.1:42091 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:57:55,354 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:58416 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:42967:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58416 dst: /127.0.0.1:42967 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:57:55,353 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:58408 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:42967:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58408 dst: /127.0.0.1:42967 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:57:55,354 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1732916473_22 at /127.0.0.1:40520 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:42091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40520 dst: /127.0.0.1:42091 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:57:55,354 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_954362276_22 at /127.0.0.1:58448 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:42967:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58448 dst: /127.0.0.1:42967 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:57:55,354 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:40562 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:42091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40562 dst: /127.0.0.1:42091 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:57:55,356 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@24c93faa{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:57:55,356 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1e8113bb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T07:57:55,356 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T07:57:55,357 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@144f8866{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T07:57:55,357 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@42443481{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/hadoop.log.dir/,STOPPED} 2024-12-08T07:57:55,358 WARN [BP-997826538-172.17.0.2-1733644659391 heartbeating to localhost/127.0.0.1:37725 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T07:57:55,358 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T07:57:55,358 WARN [BP-997826538-172.17.0.2-1733644659391 heartbeating to localhost/127.0.0.1:37725 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-997826538-172.17.0.2-1733644659391 (Datanode Uuid 680b3943-ef0b-4b4c-af1f-96c7df4955d5) service to localhost/127.0.0.1:37725 2024-12-08T07:57:55,358 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T07:57:55,358 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data3/current/BP-997826538-172.17.0.2-1733644659391 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T07:57:55,358 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data4/current/BP-997826538-172.17.0.2-1733644659391 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T07:57:55,359 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T07:57:55,359 WARN [DataStreamer for file /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 block BP-997826538-172.17.0.2-1733644659391:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:57:55,362 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@4680a3c2 {}] datanode.DataXceiver(331): 127.0.0.1:42967:DataXceiver error processing unknown operation src: /127.0.0.1:39766 dst: /127.0.0.1:42967 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:57:55,362 WARN [DataStreamer for file /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644662603 block BP-997826538-172.17.0.2-1733644659391:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:57:55,362 WARN [DataStreamer for file /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta block BP-997826538-172.17.0.2-1733644659391:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:57:55,362 WARN [DataStreamer for file /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/WALs/0106a245d0e8,33265,1733644661788/0106a245d0e8%2C33265%2C1733644661788.1733644662110 block BP-997826538-172.17.0.2-1733644659391:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:57:55,363 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5dd91be7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:57:55,363 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@55cdd36{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T07:57:55,364 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T07:57:55,364 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@23bb5222{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T07:57:55,364 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a4bf55{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/hadoop.log.dir/,STOPPED} 2024-12-08T07:57:55,365 WARN [BP-997826538-172.17.0.2-1733644659391 heartbeating to localhost/127.0.0.1:37725 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T07:57:55,365 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T07:57:55,365 WARN [BP-997826538-172.17.0.2-1733644659391 heartbeating to localhost/127.0.0.1:37725 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-997826538-172.17.0.2-1733644659391 (Datanode Uuid 08018853-d44f-4a79-a44e-474bbefc64c2) service to localhost/127.0.0.1:37725 2024-12-08T07:57:55,365 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T07:57:55,365 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data1/current/BP-997826538-172.17.0.2-1733644659391 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T07:57:55,365 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data2/current/BP-997826538-172.17.0.2-1733644659391 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T07:57:55,366 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T07:57:55,368 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a., hostname=0106a245d0e8,43367,1733644661968, seqNum=2] 2024-12-08T07:57:55,370 ERROR [FSHLog-0-hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621-prefix:0106a245d0e8,43367,1733644661968 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:57:55,370 WARN [FSHLog-0-hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621-prefix:0106a245d0e8,43367,1733644661968 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:57:55,370 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0106a245d0e8%2C43367%2C1733644661968:(num 1733644662603) roll requested 2024-12-08T07:57:55,370 INFO [regionserver/0106a245d0e8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C43367%2C1733644661968.1733644675370 2024-12-08T07:57:55,376 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:55,376 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:55,377 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:55,377 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:55,377 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:55,377 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644662603 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644675370 2024-12-08T07:57:55,377 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:57:55,378 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:57:55,378 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36125:36125),(127.0.0.1/127.0.0.1:35363:35363)] 2024-12-08T07:57:55,378 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644662603 is not closed yet, will try archiving it next time 2024-12-08T07:57:55,379 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-08T07:57:55,379 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-08T07:57:55,379 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644662603 2024-12-08T07:57:55,382 WARN [IPC Server handler 3 on default port 37725 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644662603 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-12-08T07:57:55,385 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644662603 after 4ms 2024-12-08T07:57:56,091 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:57:57,340 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:57:57,378 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:57:57,379 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644675370 2024-12-08T07:57:57,380 WARN [ResponseProcessor for block BP-997826538-172.17.0.2-1733644659391:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-997826538-172.17.0.2-1733644659391:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:57:57,380 WARN [DataStreamer for file /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644675370 block BP-997826538-172.17.0.2-1733644659391:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK], DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK]) is bad. 2024-12-08T07:57:57,381 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:59346 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:46459:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59346 dst: /127.0.0.1:46459 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:57:57,381 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:53706 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:44111:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53706 dst: /127.0.0.1:44111 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:57:57,407 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4009f856{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:57:57,408 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7b4117c9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T07:57:57,408 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T07:57:57,408 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@514acf4b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T07:57:57,408 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3904e150{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/hadoop.log.dir/,STOPPED} 2024-12-08T07:57:57,409 WARN [BP-997826538-172.17.0.2-1733644659391 heartbeating to localhost/127.0.0.1:37725 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T07:57:57,409 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T07:57:57,410 WARN [BP-997826538-172.17.0.2-1733644659391 heartbeating to localhost/127.0.0.1:37725 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-997826538-172.17.0.2-1733644659391 (Datanode Uuid 835a85d9-37ea-4994-a27d-87f31023e8c1) service to localhost/127.0.0.1:37725 2024-12-08T07:57:57,410 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T07:57:57,410 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data7/current/BP-997826538-172.17.0.2-1733644659391 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T07:57:57,411 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data8/current/BP-997826538-172.17.0.2-1733644659391 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T07:57:57,411 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T07:57:58,091 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:57:59,340 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:57:59,379 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:57:59,379 WARN [regionserver/0106a245d0e8:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK]] 2024-12-08T07:57:59,379 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0106a245d0e8%2C43367%2C1733644661968:(num 1733644675370) roll requested 2024-12-08T07:57:59,380 INFO [regionserver/0106a245d0e8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C43367%2C1733644661968.1733644679380 2024-12-08T07:57:59,386 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644662603 after 4007ms 2024-12-08T07:57:59,388 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42967 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:57:59,388 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:51800 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741839_1021] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data6]'}, localName='127.0.0.1:34057', datanodeUuid='1749c9bd-5e00-409e-94fc-ba33301b0ccc', xmitsInProgress=0}:Exception transferring block BP-997826538-172.17.0.2-1733644659391:blk_1073741839_1021 to mirror 127.0.0.1:42967 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:57:59,388 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34057,DS-c578cac0-fb58-4498-8432-04367948e478,DISK], DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]) is bad. 2024-12-08T07:57:59,389 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741839_1021 2024-12-08T07:57:59,389 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:51800 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741839_1021] {}] datanode.BlockReceiver(316): Block 1073741839 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-08T07:57:59,389 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:51800 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:34057:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51800 dst: /127.0.0.1:34057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:57:59,391 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK] 2024-12-08T07:57:59,396 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46459 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:57:59,396 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:51804 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741840_1022] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data6]'}, localName='127.0.0.1:34057', datanodeUuid='1749c9bd-5e00-409e-94fc-ba33301b0ccc', xmitsInProgress=0}:Exception transferring block BP-997826538-172.17.0.2-1733644659391:blk_1073741840_1022 to mirror 127.0.0.1:46459 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:57:59,396 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34057,DS-c578cac0-fb58-4498-8432-04367948e478,DISK], DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK]) is bad. 2024-12-08T07:57:59,396 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:51804 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741840_1022] {}] datanode.BlockReceiver(316): Block 1073741840 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-08T07:57:59,396 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741840_1022 2024-12-08T07:57:59,396 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:51804 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:34057:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51804 dst: /127.0.0.1:34057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:57:59,397 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK] 2024-12-08T07:57:59,401 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:59,401 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:59,402 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:59,402 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:59,402 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:57:59,402 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644675370 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644679380 2024-12-08T07:57:59,403 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35363:35363),(127.0.0.1/127.0.0.1:40587:40587)] 2024-12-08T07:57:59,403 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644662603 is not closed yet, will try archiving it next time 2024-12-08T07:57:59,403 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644675370 is not closed yet, will try archiving it next time 2024-12-08T07:57:59,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44111 is added to blk_1073741838_1020 (size=2431) 2024-12-08T07:57:59,417 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-08T07:57:59,805 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644662603 is not closed yet, will try archiving it next time 2024-12-08T07:58:00,092 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:01,341 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:01,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741838_1020 (size=2431) 2024-12-08T07:58:01,403 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:01,421 WARN [ResponseProcessor for block BP-997826538-172.17.0.2-1733644659391:blk_1073741841_1023 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-997826538-172.17.0.2-1733644659391:blk_1073741841_1023 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:01,421 WARN [DataStreamer for file /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644679380 block BP-997826538-172.17.0.2-1733644659391:blk_1073741841_1023 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK], DatanodeInfoWithStorage[127.0.0.1:34057,DS-c578cac0-fb58-4498-8432-04367948e478,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK]) is bad. 2024-12-08T07:58:01,421 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:51810 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:34057:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51810 dst: /127.0.0.1:34057 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:01,421 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:53728 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:44111:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53728 dst: /127.0.0.1:44111 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:01,450 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@638f230f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:58:01,451 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@69bcca{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T07:58:01,451 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T07:58:01,451 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@136e75a1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T07:58:01,451 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3e1ad43e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/hadoop.log.dir/,STOPPED} 2024-12-08T07:58:01,452 WARN [BP-997826538-172.17.0.2-1733644659391 heartbeating to localhost/127.0.0.1:37725 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T07:58:01,452 WARN [BP-997826538-172.17.0.2-1733644659391 heartbeating to localhost/127.0.0.1:37725 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-997826538-172.17.0.2-1733644659391 (Datanode Uuid a5c7702b-eb77-46f1-89eb-e3a9b6af1282) service to localhost/127.0.0.1:37725 2024-12-08T07:58:01,452 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T07:58:01,452 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T07:58:01,453 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data9/current/BP-997826538-172.17.0.2-1733644659391 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T07:58:01,453 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data10/current/BP-997826538-172.17.0.2-1733644659391 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T07:58:01,453 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T07:58:01,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43367 {}] regionserver.HRegion(8855): Flush requested on 00c40378502d1ce97c64d133305e3b9a 2024-12-08T07:58:01,465 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 00c40378502d1ce97c64d133305e3b9a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T07:58:01,500 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/.tmp/info/4cabce9aeb34416988361e0dd4372374 is 1080, key is row0002/info:/1733644677412/Put/seqid=0 2024-12-08T07:58:01,502 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:01,502 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK], DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK]) is bad. 2024-12-08T07:58:01,502 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741842_1025 2024-12-08T07:58:01,503 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK] 2024-12-08T07:58:01,504 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:01,504 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK], DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]) is bad. 2024-12-08T07:58:01,504 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741843_1026 2024-12-08T07:58:01,505 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK] 2024-12-08T07:58:01,507 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42091 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:01,507 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:44644 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741844_1027] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data6]'}, localName='127.0.0.1:34057', datanodeUuid='1749c9bd-5e00-409e-94fc-ba33301b0ccc', xmitsInProgress=0}:Exception transferring block BP-997826538-172.17.0.2-1733644659391:blk_1073741844_1027 to mirror 127.0.0.1:42091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:01,507 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34057,DS-c578cac0-fb58-4498-8432-04367948e478,DISK], DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK]) is bad. 2024-12-08T07:58:01,507 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:44644 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741844_1027] {}] datanode.BlockReceiver(316): Block 1073741844 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-08T07:58:01,507 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741844_1027 2024-12-08T07:58:01,507 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:44644 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741844_1027] {}] datanode.DataXceiver(331): 127.0.0.1:34057:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44644 dst: /127.0.0.1:34057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:01,508 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK] 2024-12-08T07:58:01,510 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44111 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:01,510 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:44646 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741845_1028] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data6]'}, localName='127.0.0.1:34057', datanodeUuid='1749c9bd-5e00-409e-94fc-ba33301b0ccc', xmitsInProgress=0}:Exception transferring block BP-997826538-172.17.0.2-1733644659391:blk_1073741845_1028 to mirror 127.0.0.1:44111 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:01,511 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34057,DS-c578cac0-fb58-4498-8432-04367948e478,DISK], DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK]) is bad. 2024-12-08T07:58:01,511 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741845_1028 2024-12-08T07:58:01,511 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:44646 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741845_1028] {}] datanode.BlockReceiver(316): Block 1073741845 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-08T07:58:01,511 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:44646 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741845_1028] {}] datanode.DataXceiver(331): 127.0.0.1:34057:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44646 dst: /127.0.0.1:34057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:01,512 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK] 2024-12-08T07:58:01,513 WARN [IPC Server handler 0 on default port 37725 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-08T07:58:01,513 WARN [IPC Server handler 0 on default port 37725 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-08T07:58:01,513 WARN [IPC Server handler 0 on default port 37725 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-08T07:58:01,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741846_1029 (size=10347) 2024-12-08T07:58:01,917 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/.tmp/info/4cabce9aeb34416988361e0dd4372374 2024-12-08T07:58:01,928 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/.tmp/info/4cabce9aeb34416988361e0dd4372374 as hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/4cabce9aeb34416988361e0dd4372374 2024-12-08T07:58:01,935 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/4cabce9aeb34416988361e0dd4372374, entries=5, sequenceid=11, filesize=10.1 K 2024-12-08T07:58:01,936 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 00c40378502d1ce97c64d133305e3b9a in 472ms, sequenceid=11, compaction requested=false 2024-12-08T07:58:01,936 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 00c40378502d1ce97c64d133305e3b9a: 2024-12-08T07:58:02,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43367 {}] regionserver.HRegion(8855): Flush requested on 00c40378502d1ce97c64d133305e3b9a 2024-12-08T07:58:02,092 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 00c40378502d1ce97c64d133305e3b9a 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-12-08T07:58:02,092 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:02,097 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/.tmp/info/62b2aff2a6f844c1b97f9c010be2f629 is 1080, key is row0007/info:/1733644681466/Put/seqid=0 2024-12-08T07:58:02,099 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:02,099 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK], DatanodeInfoWithStorage[127.0.0.1:34057,DS-c578cac0-fb58-4498-8432-04367948e478,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK]) is bad. 2024-12-08T07:58:02,099 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741847_1030 2024-12-08T07:58:02,100 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK] 2024-12-08T07:58:02,101 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:02,102 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK], DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK]) is bad. 2024-12-08T07:58:02,102 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741848_1031 2024-12-08T07:58:02,102 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK] 2024-12-08T07:58:02,104 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:02,104 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK], DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]) is bad. 2024-12-08T07:58:02,104 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741849_1032 2024-12-08T07:58:02,105 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK] 2024-12-08T07:58:02,106 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:02,107 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK], DatanodeInfoWithStorage[127.0.0.1:34057,DS-c578cac0-fb58-4498-8432-04367948e478,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK]) is bad. 2024-12-08T07:58:02,107 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741850_1033 2024-12-08T07:58:02,107 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK] 2024-12-08T07:58:02,108 WARN [IPC Server handler 2 on default port 37725 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-08T07:58:02,108 WARN [IPC Server handler 2 on default port 37725 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-08T07:58:02,109 WARN [IPC Server handler 2 on default port 37725 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-08T07:58:02,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741851_1034 (size=12506) 2024-12-08T07:58:02,513 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/.tmp/info/62b2aff2a6f844c1b97f9c010be2f629 2024-12-08T07:58:02,521 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/.tmp/info/62b2aff2a6f844c1b97f9c010be2f629 as hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/62b2aff2a6f844c1b97f9c010be2f629 2024-12-08T07:58:02,530 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/62b2aff2a6f844c1b97f9c010be2f629, entries=7, sequenceid=24, filesize=12.2 K 2024-12-08T07:58:02,531 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 00c40378502d1ce97c64d133305e3b9a in 439ms, sequenceid=24, compaction requested=false 2024-12-08T07:58:02,531 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 00c40378502d1ce97c64d133305e3b9a: 2024-12-08T07:58:02,532 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-12-08T07:58:02,532 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T07:58:02,532 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/62b2aff2a6f844c1b97f9c010be2f629 because midkey is the same as first or last row 2024-12-08T07:58:03,341 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:03,404 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:03,404 WARN [regionserver/0106a245d0e8:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34057,DS-c578cac0-fb58-4498-8432-04367948e478,DISK]] 2024-12-08T07:58:03,404 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0106a245d0e8%2C43367%2C1733644661968:(num 1733644679380) roll requested 2024-12-08T07:58:03,405 INFO [regionserver/0106a245d0e8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C43367%2C1733644661968.1733644683404 2024-12-08T07:58:03,408 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:03,408 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK], DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK]) is bad. 2024-12-08T07:58:03,408 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741852_1035 2024-12-08T07:58:03,409 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK] 2024-12-08T07:58:03,411 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:03,411 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK], DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK]) is bad. 2024-12-08T07:58:03,412 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741853_1036 2024-12-08T07:58:03,412 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK] 2024-12-08T07:58:03,414 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:03,414 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK], DatanodeInfoWithStorage[127.0.0.1:34057,DS-c578cac0-fb58-4498-8432-04367948e478,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]) is bad. 2024-12-08T07:58:03,414 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741854_1037 2024-12-08T07:58:03,414 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK] 2024-12-08T07:58:03,416 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:03,417 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK], DatanodeInfoWithStorage[127.0.0.1:34057,DS-c578cac0-fb58-4498-8432-04367948e478,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK]) is bad. 2024-12-08T07:58:03,417 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741855_1038 2024-12-08T07:58:03,417 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK] 2024-12-08T07:58:03,418 WARN [IPC Server handler 1 on default port 37725 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-08T07:58:03,418 WARN [IPC Server handler 1 on default port 37725 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-08T07:58:03,418 WARN [IPC Server handler 1 on default port 37725 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-08T07:58:03,421 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:03,421 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:03,421 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:03,421 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:03,421 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:03,421 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644679380 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644683404 2024-12-08T07:58:03,422 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40587:40587)] 2024-12-08T07:58:03,422 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644662603 is not closed yet, will try archiving it next time 2024-12-08T07:58:03,422 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644679380 is not closed yet, will try archiving it next time 2024-12-08T07:58:03,423 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644675370 to hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/oldWALs/0106a245d0e8%2C43367%2C1733644661968.1733644675370 2024-12-08T07:58:03,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741841_1024 (size=25992) 2024-12-08T07:58:03,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43367 {}] regionserver.HRegion(8855): Flush requested on 00c40378502d1ce97c64d133305e3b9a 2024-12-08T07:58:03,528 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 00c40378502d1ce97c64d133305e3b9a 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-08T07:58:03,533 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/.tmp/info/d7e06a7653924d1ba6099269ffa9d30c is 1079, key is tmprow/info:/1733644683526/Put/seqid=0 2024-12-08T07:58:03,535 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:03,535 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK], DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK]) is bad. 2024-12-08T07:58:03,535 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741857_1040 2024-12-08T07:58:03,536 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK] 2024-12-08T07:58:03,537 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:03,537 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK], DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]) is bad. 2024-12-08T07:58:03,537 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741858_1041 2024-12-08T07:58:03,538 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK] 2024-12-08T07:58:03,539 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:03,539 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK], DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK]) is bad. 2024-12-08T07:58:03,539 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741859_1042 2024-12-08T07:58:03,540 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK] 2024-12-08T07:58:03,541 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:03,541 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK], DatanodeInfoWithStorage[127.0.0.1:34057,DS-c578cac0-fb58-4498-8432-04367948e478,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK]) is bad. 2024-12-08T07:58:03,541 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741860_1043 2024-12-08T07:58:03,542 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK] 2024-12-08T07:58:03,543 WARN [IPC Server handler 0 on default port 37725 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-08T07:58:03,543 WARN [IPC Server handler 0 on default port 37725 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-08T07:58:03,543 WARN [IPC Server handler 0 on default port 37725 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-08T07:58:03,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741861_1044 (size=6027) 2024-12-08T07:58:03,824 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644662603 is not closed yet, will try archiving it next time 2024-12-08T07:58:03,947 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/.tmp/info/d7e06a7653924d1ba6099269ffa9d30c 2024-12-08T07:58:03,955 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/.tmp/info/d7e06a7653924d1ba6099269ffa9d30c as hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/d7e06a7653924d1ba6099269ffa9d30c 2024-12-08T07:58:03,964 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/d7e06a7653924d1ba6099269ffa9d30c, entries=1, sequenceid=34, filesize=5.9 K 2024-12-08T07:58:03,965 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 00c40378502d1ce97c64d133305e3b9a in 438ms, sequenceid=34, compaction requested=true 2024-12-08T07:58:03,965 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 00c40378502d1ce97c64d133305e3b9a: 2024-12-08T07:58:03,965 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-12-08T07:58:03,965 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T07:58:03,966 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/62b2aff2a6f844c1b97f9c010be2f629 because midkey is the same as first or last row 2024-12-08T07:58:03,966 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00c40378502d1ce97c64d133305e3b9a:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T07:58:03,966 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T07:58:03,966 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T07:58:03,968 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T07:58:03,968 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.HStore(1541): 00c40378502d1ce97c64d133305e3b9a/info is initiating minor compaction (all files) 2024-12-08T07:58:03,968 INFO [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 00c40378502d1ce97c64d133305e3b9a/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a. 2024-12-08T07:58:03,968 INFO [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/4cabce9aeb34416988361e0dd4372374, hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/62b2aff2a6f844c1b97f9c010be2f629, hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/d7e06a7653924d1ba6099269ffa9d30c] into tmpdir=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/.tmp, totalSize=28.2 K 2024-12-08T07:58:03,969 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4cabce9aeb34416988361e0dd4372374, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733644677412 2024-12-08T07:58:03,970 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] compactions.Compactor(225): Compacting 62b2aff2a6f844c1b97f9c010be2f629, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1733644681466 2024-12-08T07:58:03,970 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] compactions.Compactor(225): Compacting d7e06a7653924d1ba6099269ffa9d30c, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733644683526 2024-12-08T07:58:03,988 INFO [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00c40378502d1ce97c64d133305e3b9a#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T07:58:03,989 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/.tmp/info/186e9f1908b74b41ae3f1061dfe187ea is 1080, key is row0002/info:/1733644677412/Put/seqid=0 2024-12-08T07:58:03,991 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:03,991 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK], DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]) is bad. 2024-12-08T07:58:03,991 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741862_1045 2024-12-08T07:58:03,992 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK] 2024-12-08T07:58:03,993 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:03,994 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK], DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK]) is bad. 2024-12-08T07:58:03,994 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741863_1046 2024-12-08T07:58:03,994 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK] 2024-12-08T07:58:03,997 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:03,997 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK], DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK]) is bad. 2024-12-08T07:58:03,997 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741864_1047 2024-12-08T07:58:03,998 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK] 2024-12-08T07:58:03,999 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:04,000 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK], DatanodeInfoWithStorage[127.0.0.1:34057,DS-c578cac0-fb58-4498-8432-04367948e478,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK]) is bad. 2024-12-08T07:58:04,000 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741865_1048 2024-12-08T07:58:04,000 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK] 2024-12-08T07:58:04,001 WARN [IPC Server handler 1 on default port 37725 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-08T07:58:04,001 WARN [IPC Server handler 1 on default port 37725 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-08T07:58:04,001 WARN [IPC Server handler 1 on default port 37725 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-08T07:58:04,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741866_1049 (size=17994) 2024-12-08T07:58:04,017 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/.tmp/info/186e9f1908b74b41ae3f1061dfe187ea as hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/186e9f1908b74b41ae3f1061dfe187ea 2024-12-08T07:58:04,026 INFO [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 00c40378502d1ce97c64d133305e3b9a/info of 00c40378502d1ce97c64d133305e3b9a into 186e9f1908b74b41ae3f1061dfe187ea(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T07:58:04,026 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 00c40378502d1ce97c64d133305e3b9a: 2024-12-08T07:58:04,026 INFO [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a., storeName=00c40378502d1ce97c64d133305e3b9a/info, priority=13, startTime=1733644683966; duration=0sec 2024-12-08T07:58:04,026 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-08T07:58:04,026 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T07:58:04,026 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/186e9f1908b74b41ae3f1061dfe187ea because midkey is the same as first or last row 2024-12-08T07:58:04,026 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-08T07:58:04,026 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T07:58:04,026 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/186e9f1908b74b41ae3f1061dfe187ea because midkey is the same as first or last row 2024-12-08T07:58:04,027 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-08T07:58:04,027 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T07:58:04,027 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/186e9f1908b74b41ae3f1061dfe187ea because midkey is the same as first or last row 2024-12-08T07:58:04,027 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T07:58:04,027 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00c40378502d1ce97c64d133305e3b9a:info 2024-12-08T07:58:04,092 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:04,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43367 {}] regionserver.HRegion(8855): Flush requested on 00c40378502d1ce97c64d133305e3b9a 2024-12-08T07:58:04,950 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 00c40378502d1ce97c64d133305e3b9a 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-08T07:58:04,954 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/.tmp/info/f29d95f35c404ce3a543e184e2743c23 is 1079, key is tmprow/info:/1733644684948/Put/seqid=0 2024-12-08T07:58:04,994 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@f113501[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34057, datanodeUuid=1749c9bd-5e00-409e-94fc-ba33301b0ccc, infoPort=40587, infoSecurePort=0, ipcPort=42479, storageInfo=lv=-57;cid=testClusterID;nsid=466817069;c=1733644659391):Failed to transfer BP-997826538-172.17.0.2-1733644659391:blk_1073741846_1029 to 127.0.0.1:42967 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:04,994 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@48729010[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34057, datanodeUuid=1749c9bd-5e00-409e-94fc-ba33301b0ccc, infoPort=40587, infoSecurePort=0, ipcPort=42479, storageInfo=lv=-57;cid=testClusterID;nsid=466817069;c=1733644659391):Failed to transfer BP-997826538-172.17.0.2-1733644659391:blk_1073741851_1034 to 127.0.0.1:42091 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:05,341 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:05,423 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:05,423 WARN [regionserver/0106a245d0e8:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34057,DS-c578cac0-fb58-4498-8432-04367948e478,DISK]] 2024-12-08T07:58:05,423 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0106a245d0e8%2C43367%2C1733644661968:(num 1733644683404) roll requested 2024-12-08T07:58:05,424 INFO [regionserver/0106a245d0e8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C43367%2C1733644661968.1733644685423 2024-12-08T07:58:05,427 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:05,427 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK], DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK]) is bad. 2024-12-08T07:58:05,428 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741868_1051 2024-12-08T07:58:05,428 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK] 2024-12-08T07:58:05,430 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:05,430 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK], DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK]) is bad. 2024-12-08T07:58:05,430 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741869_1052 2024-12-08T07:58:05,431 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK] 2024-12-08T07:58:05,434 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46459 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:05,434 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:44724 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741870_1053] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data6]'}, localName='127.0.0.1:34057', datanodeUuid='1749c9bd-5e00-409e-94fc-ba33301b0ccc', xmitsInProgress=0}:Exception transferring block BP-997826538-172.17.0.2-1733644659391:blk_1073741870_1053 to mirror 127.0.0.1:46459 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:05,434 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34057,DS-c578cac0-fb58-4498-8432-04367948e478,DISK], DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK]) is bad. 2024-12-08T07:58:05,434 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741870_1053 2024-12-08T07:58:05,435 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:44724 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741870_1053] {}] datanode.BlockReceiver(316): Block 1073741870 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-08T07:58:05,435 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:44724 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741870_1053] {}] datanode.DataXceiver(331): 127.0.0.1:34057:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44724 dst: /127.0.0.1:34057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:05,435 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK] 2024-12-08T07:58:05,438 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42967 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:05,438 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:44734 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741871_1054] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data6]'}, localName='127.0.0.1:34057', datanodeUuid='1749c9bd-5e00-409e-94fc-ba33301b0ccc', xmitsInProgress=0}:Exception transferring block BP-997826538-172.17.0.2-1733644659391:blk_1073741871_1054 to mirror 127.0.0.1:42967 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:05,438 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34057,DS-c578cac0-fb58-4498-8432-04367948e478,DISK], DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]) is bad. 2024-12-08T07:58:05,438 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741871_1054 2024-12-08T07:58:05,438 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:44734 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741871_1054] {}] datanode.BlockReceiver(316): Block 1073741871 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-08T07:58:05,438 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:44734 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741871_1054] {}] datanode.DataXceiver(331): 127.0.0.1:34057:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44734 dst: /127.0.0.1:34057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:05,439 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK] 2024-12-08T07:58:05,440 WARN [IPC Server handler 4 on default port 37725 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-08T07:58:05,440 WARN [IPC Server handler 4 on default port 37725 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-08T07:58:05,440 WARN [IPC Server handler 4 on default port 37725 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-08T07:58:05,446 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:05,446 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:05,446 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:05,446 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:05,446 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:05,446 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644683404 with entries=14, filesize=12.92 KB; new WAL /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644685423 2024-12-08T07:58:05,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741856_1039 (size=13234) 2024-12-08T07:58:05,453 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40587:40587)] 2024-12-08T07:58:05,453 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644662603 is not closed yet, will try archiving it next time 2024-12-08T07:58:05,453 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644683404 is not closed yet, will try archiving it next time 2024-12-08T07:58:05,453 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644679380 to hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/oldWALs/0106a245d0e8%2C43367%2C1733644661968.1733644679380 2024-12-08T07:58:05,849 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644662603 is not closed yet, will try archiving it next time 2024-12-08T07:58:05,965 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:05,965 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK], DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK]) is bad. 2024-12-08T07:58:05,965 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741867_1050 2024-12-08T07:58:05,966 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK] 2024-12-08T07:58:05,968 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42967 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:05,968 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:44750 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741873_1056] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data6]'}, localName='127.0.0.1:34057', datanodeUuid='1749c9bd-5e00-409e-94fc-ba33301b0ccc', xmitsInProgress=0}:Exception transferring block BP-997826538-172.17.0.2-1733644659391:blk_1073741873_1056 to mirror 127.0.0.1:42967 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:05,969 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34057,DS-c578cac0-fb58-4498-8432-04367948e478,DISK], DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]) is bad. 2024-12-08T07:58:05,969 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:44750 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741873_1056] {}] datanode.BlockReceiver(316): Block 1073741873 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-08T07:58:05,969 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741873_1056 2024-12-08T07:58:05,969 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:44750 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741873_1056] {}] datanode.DataXceiver(331): 127.0.0.1:34057:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44750 dst: /127.0.0.1:34057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:05,969 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK] 2024-12-08T07:58:05,971 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:05,971 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK], DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK]) is bad. 2024-12-08T07:58:05,971 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741874_1057 2024-12-08T07:58:05,972 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK] 2024-12-08T07:58:05,974 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46459 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:05,974 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:44756 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741875_1058] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data6]'}, localName='127.0.0.1:34057', datanodeUuid='1749c9bd-5e00-409e-94fc-ba33301b0ccc', xmitsInProgress=0}:Exception transferring block BP-997826538-172.17.0.2-1733644659391:blk_1073741875_1058 to mirror 127.0.0.1:46459 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:05,974 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34057,DS-c578cac0-fb58-4498-8432-04367948e478,DISK], DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK]) is bad. 2024-12-08T07:58:05,975 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741875_1058 2024-12-08T07:58:05,975 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:44756 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741875_1058] {}] datanode.BlockReceiver(316): Block 1073741875 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-08T07:58:05,975 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:44756 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741875_1058] {}] datanode.DataXceiver(331): 127.0.0.1:34057:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44756 dst: /127.0.0.1:34057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:05,975 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK] 2024-12-08T07:58:05,976 WARN [IPC Server handler 2 on default port 37725 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-08T07:58:05,976 WARN [IPC Server handler 2 on default port 37725 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-08T07:58:05,976 WARN [IPC Server handler 2 on default port 37725 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-08T07:58:05,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741876_1059 (size=6027) 2024-12-08T07:58:05,994 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@f113501[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34057, datanodeUuid=1749c9bd-5e00-409e-94fc-ba33301b0ccc, infoPort=40587, infoSecurePort=0, ipcPort=42479, storageInfo=lv=-57;cid=testClusterID;nsid=466817069;c=1733644659391):Failed to transfer BP-997826538-172.17.0.2-1733644659391:blk_1073741861_1044 to 127.0.0.1:44111 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:05,994 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@48729010[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34057, datanodeUuid=1749c9bd-5e00-409e-94fc-ba33301b0ccc, infoPort=40587, infoSecurePort=0, ipcPort=42479, storageInfo=lv=-57;cid=testClusterID;nsid=466817069;c=1733644659391):Failed to transfer BP-997826538-172.17.0.2-1733644659391:blk_1073741841_1024 to 127.0.0.1:42967 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:06,093 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:06,380 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/.tmp/info/f29d95f35c404ce3a543e184e2743c23 2024-12-08T07:58:06,387 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/.tmp/info/f29d95f35c404ce3a543e184e2743c23 as hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/f29d95f35c404ce3a543e184e2743c23 2024-12-08T07:58:06,395 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/f29d95f35c404ce3a543e184e2743c23, entries=1, sequenceid=45, filesize=5.9 K 2024-12-08T07:58:06,397 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=7.35 KB/7525 for 00c40378502d1ce97c64d133305e3b9a in 1446ms, sequenceid=45, compaction requested=false 2024-12-08T07:58:06,397 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 00c40378502d1ce97c64d133305e3b9a: 2024-12-08T07:58:06,397 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-12-08T07:58:06,397 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T07:58:06,397 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/186e9f1908b74b41ae3f1061dfe187ea because midkey is the same as first or last row 2024-12-08T07:58:06,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43367 {}] regionserver.HRegion(8855): Flush requested on 00c40378502d1ce97c64d133305e3b9a 2024-12-08T07:58:06,574 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 00c40378502d1ce97c64d133305e3b9a 1/1 column families, dataSize=8.40 KB heapSize=9.25 KB 2024-12-08T07:58:06,581 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/.tmp/info/d7d4af017b14401687489b23bf221e53 is 1079, key is tmprow/info:/1733644686573/Put/seqid=0 2024-12-08T07:58:06,583 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:06,584 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK], DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK]) is bad. 2024-12-08T07:58:06,584 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741877_1060 2024-12-08T07:58:06,585 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK] 2024-12-08T07:58:06,586 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:06,587 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK], DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]) is bad. 2024-12-08T07:58:06,587 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741878_1061 2024-12-08T07:58:06,587 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK] 2024-12-08T07:58:06,589 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:06,589 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK], DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK]) is bad. 2024-12-08T07:58:06,589 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741879_1062 2024-12-08T07:58:06,590 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK] 2024-12-08T07:58:06,591 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:06,591 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK], DatanodeInfoWithStorage[127.0.0.1:34057,DS-c578cac0-fb58-4498-8432-04367948e478,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK]) is bad. 2024-12-08T07:58:06,591 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741880_1063 2024-12-08T07:58:06,592 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK] 2024-12-08T07:58:06,593 WARN [IPC Server handler 1 on default port 37725 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-08T07:58:06,593 WARN [IPC Server handler 1 on default port 37725 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-08T07:58:06,593 WARN [IPC Server handler 1 on default port 37725 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-08T07:58:06,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741881_1064 (size=6027) 2024-12-08T07:58:06,997 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.40 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/.tmp/info/d7d4af017b14401687489b23bf221e53 2024-12-08T07:58:07,003 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/.tmp/info/d7d4af017b14401687489b23bf221e53 as hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/d7d4af017b14401687489b23bf221e53 2024-12-08T07:58:07,009 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/d7d4af017b14401687489b23bf221e53, entries=1, sequenceid=56, filesize=5.9 K 2024-12-08T07:58:07,010 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.40 KB/8600, heapSize ~9.23 KB/9456, currentSize=2.10 KB/2150 for 00c40378502d1ce97c64d133305e3b9a in 436ms, sequenceid=56, compaction requested=true 2024-12-08T07:58:07,010 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 00c40378502d1ce97c64d133305e3b9a: 2024-12-08T07:58:07,011 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-12-08T07:58:07,011 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T07:58:07,011 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/186e9f1908b74b41ae3f1061dfe187ea because midkey is the same as first or last row 2024-12-08T07:58:07,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00c40378502d1ce97c64d133305e3b9a:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T07:58:07,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T07:58:07,011 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T07:58:07,012 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T07:58:07,012 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.HStore(1541): 00c40378502d1ce97c64d133305e3b9a/info is initiating minor compaction (all files) 2024-12-08T07:58:07,012 INFO [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 00c40378502d1ce97c64d133305e3b9a/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a. 2024-12-08T07:58:07,012 INFO [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/186e9f1908b74b41ae3f1061dfe187ea, hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/f29d95f35c404ce3a543e184e2743c23, hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/d7d4af017b14401687489b23bf221e53] into tmpdir=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/.tmp, totalSize=29.3 K 2024-12-08T07:58:07,013 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] compactions.Compactor(225): Compacting 186e9f1908b74b41ae3f1061dfe187ea, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733644677412 2024-12-08T07:58:07,013 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] compactions.Compactor(225): Compacting f29d95f35c404ce3a543e184e2743c23, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1733644684948 2024-12-08T07:58:07,014 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] compactions.Compactor(225): Compacting d7d4af017b14401687489b23bf221e53, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1733644686573 2024-12-08T07:58:07,029 INFO [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00c40378502d1ce97c64d133305e3b9a#info#compaction#24 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T07:58:07,030 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/.tmp/info/0fda9a6416484f408685067ba4e7768f is 1080, key is row0002/info:/1733644677412/Put/seqid=0 2024-12-08T07:58:07,033 WARN [Thread-969 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42967 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:07,033 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:44780 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741882_1065] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data6]'}, localName='127.0.0.1:34057', datanodeUuid='1749c9bd-5e00-409e-94fc-ba33301b0ccc', xmitsInProgress=0}:Exception transferring block BP-997826538-172.17.0.2-1733644659391:blk_1073741882_1065 to mirror 127.0.0.1:42967 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:07,033 WARN [Thread-969 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34057,DS-c578cac0-fb58-4498-8432-04367948e478,DISK], DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]) is bad. 2024-12-08T07:58:07,033 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:44780 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741882_1065] {}] datanode.BlockReceiver(316): Block 1073741882 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-08T07:58:07,033 WARN [Thread-969 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741882_1065 2024-12-08T07:58:07,033 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:44780 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741882_1065] {}] datanode.DataXceiver(331): 127.0.0.1:34057:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44780 dst: /127.0.0.1:34057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:07,034 WARN [Thread-969 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK] 2024-12-08T07:58:07,035 WARN [Thread-969 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:07,035 WARN [Thread-969 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK], DatanodeInfoWithStorage[127.0.0.1:34057,DS-c578cac0-fb58-4498-8432-04367948e478,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK]) is bad. 2024-12-08T07:58:07,035 WARN [Thread-969 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741883_1066 2024-12-08T07:58:07,036 WARN [Thread-969 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK] 2024-12-08T07:58:07,037 WARN [Thread-969 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:07,037 WARN [Thread-969 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK], DatanodeInfoWithStorage[127.0.0.1:34057,DS-c578cac0-fb58-4498-8432-04367948e478,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK]) is bad. 2024-12-08T07:58:07,037 WARN [Thread-969 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741884_1067 2024-12-08T07:58:07,038 WARN [Thread-969 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK] 2024-12-08T07:58:07,039 WARN [Thread-969 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:07,040 WARN [Thread-969 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK], DatanodeInfoWithStorage[127.0.0.1:34057,DS-c578cac0-fb58-4498-8432-04367948e478,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK]) is bad. 2024-12-08T07:58:07,040 WARN [Thread-969 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741885_1068 2024-12-08T07:58:07,040 WARN [Thread-969 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42091,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK] 2024-12-08T07:58:07,041 WARN [IPC Server handler 3 on default port 37725 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-08T07:58:07,041 WARN [IPC Server handler 3 on default port 37725 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-08T07:58:07,041 WARN [IPC Server handler 3 on default port 37725 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-08T07:58:07,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741886_1069 (size=18097) 2024-12-08T07:58:07,342 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:07,453 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/.tmp/info/0fda9a6416484f408685067ba4e7768f as hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/0fda9a6416484f408685067ba4e7768f 2024-12-08T07:58:07,453 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:07,454 WARN [regionserver/0106a245d0e8:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-12-08T07:58:07,461 INFO [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 00c40378502d1ce97c64d133305e3b9a/info of 00c40378502d1ce97c64d133305e3b9a into 0fda9a6416484f408685067ba4e7768f(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T07:58:07,461 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 00c40378502d1ce97c64d133305e3b9a: 2024-12-08T07:58:07,461 INFO [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a., storeName=00c40378502d1ce97c64d133305e3b9a/info, priority=13, startTime=1733644687011; duration=0sec 2024-12-08T07:58:07,462 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-12-08T07:58:07,462 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T07:58:07,462 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/0fda9a6416484f408685067ba4e7768f because midkey is the same as first or last row 2024-12-08T07:58:07,462 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-12-08T07:58:07,462 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T07:58:07,462 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/0fda9a6416484f408685067ba4e7768f because midkey is the same as first or last row 2024-12-08T07:58:07,462 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-12-08T07:58:07,462 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T07:58:07,462 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/0fda9a6416484f408685067ba4e7768f because midkey is the same as first or last row 2024-12-08T07:58:07,462 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T07:58:07,462 DEBUG [RS:0;0106a245d0e8:43367-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00c40378502d1ce97c64d133305e3b9a:info 2024-12-08T07:58:07,600 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T07:58:07,604 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T07:58:07,604 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T07:58:07,605 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T07:58:07,605 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T07:58:07,605 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@22c6c03b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/hadoop.log.dir/,AVAILABLE} 2024-12-08T07:58:07,606 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54f27916{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T07:58:07,714 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@575d7e2f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/java.io.tmpdir/jetty-localhost-45379-hadoop-hdfs-3_4_1-tests_jar-_-any-9455597596234555809/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:58:07,714 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@39eaf0e6{HTTP/1.1, (http/1.1)}{localhost:45379} 2024-12-08T07:58:07,715 INFO [Time-limited test {}] server.Server(415): Started @135282ms 2024-12-08T07:58:07,716 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T07:58:07,993 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@f113501[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34057, datanodeUuid=1749c9bd-5e00-409e-94fc-ba33301b0ccc, infoPort=40587, infoSecurePort=0, ipcPort=42479, storageInfo=lv=-57;cid=testClusterID;nsid=466817069;c=1733644659391):Failed to transfer BP-997826538-172.17.0.2-1733644659391:blk_1073741856_1039 to 127.0.0.1:42967 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:07,993 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@48729010[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34057, datanodeUuid=1749c9bd-5e00-409e-94fc-ba33301b0ccc, infoPort=40587, infoSecurePort=0, ipcPort=42479, storageInfo=lv=-57;cid=testClusterID;nsid=466817069;c=1733644659391):Failed to transfer BP-997826538-172.17.0.2-1733644659391:blk_1073741866_1049 to 127.0.0.1:42967 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:08,093 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:08,225 WARN [Thread-988 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T07:58:08,233 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8a9b7c8f5dda0cb9 with lease ID 0x98c7d3aa439fb3b2: from storage DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8 node DatanodeRegistration(127.0.0.1:45087, datanodeUuid=680b3943-ef0b-4b4c-af1f-96c7df4955d5, infoPort=34433, infoSecurePort=0, ipcPort=46807, storageInfo=lv=-57;cid=testClusterID;nsid=466817069;c=1733644659391), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:58:08,234 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8a9b7c8f5dda0cb9 with lease ID 0x98c7d3aa439fb3b2: from storage DS-af6e152d-a5b0-4184-9035-a2aa7951d0a0 node DatanodeRegistration(127.0.0.1:45087, datanodeUuid=680b3943-ef0b-4b4c-af1f-96c7df4955d5, infoPort=34433, infoSecurePort=0, ipcPort=46807, storageInfo=lv=-57;cid=testClusterID;nsid=466817069;c=1733644659391), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:58:08,994 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@48729010[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34057, datanodeUuid=1749c9bd-5e00-409e-94fc-ba33301b0ccc, infoPort=40587, infoSecurePort=0, ipcPort=42479, storageInfo=lv=-57;cid=testClusterID;nsid=466817069;c=1733644659391):Failed to transfer BP-997826538-172.17.0.2-1733644659391:blk_1073741876_1059 to 127.0.0.1:44111 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:08,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45087 is added to blk_1073741881_1064 (size=6027) 2024-12-08T07:58:09,342 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:09,454 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:10,094 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:10,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45087 is added to blk_1073741886_1069 (size=18097) 2024-12-08T07:58:11,343 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:11,454 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:11,766 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T07:58:12,094 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:12,329 ERROR [FSHLog-0-hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData-prefix:0106a245d0e8,33265,1733644661788 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:12,329 WARN [FSHLog-0-hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData-prefix:0106a245d0e8,33265,1733644661788 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:12,329 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 0106a245d0e8%2C33265%2C1733644661788:(num 1733644662110) roll requested 2024-12-08T07:58:12,330 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C33265%2C1733644661788.1733644692330 2024-12-08T07:58:12,333 WARN [Thread-1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1070 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:12,334 WARN [Thread-1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741887_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK], DatanodeInfoWithStorage[127.0.0.1:34057,DS-c578cac0-fb58-4498-8432-04367948e478,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK]) is bad. 2024-12-08T07:58:12,334 WARN [Thread-1010 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741887_1070 2024-12-08T07:58:12,334 WARN [Thread-1010 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK] 2024-12-08T07:58:12,337 WARN [Thread-1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1071 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46459 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:12,337 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1732916473_22 at /127.0.0.1:43504 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741888_1071] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data6]'}, localName='127.0.0.1:34057', datanodeUuid='1749c9bd-5e00-409e-94fc-ba33301b0ccc', xmitsInProgress=0}:Exception transferring block BP-997826538-172.17.0.2-1733644659391:blk_1073741888_1071 to mirror 127.0.0.1:46459 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:12,337 WARN [Thread-1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741888_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34057,DS-c578cac0-fb58-4498-8432-04367948e478,DISK], DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK]) is bad. 2024-12-08T07:58:12,337 WARN [Thread-1010 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741888_1071 2024-12-08T07:58:12,338 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1732916473_22 at /127.0.0.1:43504 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741888_1071] {}] datanode.BlockReceiver(316): Block 1073741888 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-08T07:58:12,338 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1732916473_22 at /127.0.0.1:43504 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741888_1071] {}] datanode.DataXceiver(331): 127.0.0.1:34057:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43504 dst: /127.0.0.1:34057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:12,338 WARN [Thread-1010 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46459,DS-397f1186-2937-4e67-95f3-b674324c9d6c,DISK] 2024-12-08T07:58:12,348 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:12,348 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:12,348 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:12,348 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:12,348 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:12,349 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/WALs/0106a245d0e8,33265,1733644661788/0106a245d0e8%2C33265%2C1733644661788.1733644662110 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/WALs/0106a245d0e8,33265,1733644661788/0106a245d0e8%2C33265%2C1733644661788.1733644692330 2024-12-08T07:58:12,349 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:12,349 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:12,349 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/WALs/0106a245d0e8,33265,1733644661788/0106a245d0e8%2C33265%2C1733644661788.1733644662110 2024-12-08T07:58:12,349 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34433:34433),(127.0.0.1/127.0.0.1:40587:40587)] 2024-12-08T07:58:12,349 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/WALs/0106a245d0e8,33265,1733644661788/0106a245d0e8%2C33265%2C1733644661788.1733644662110 is not closed yet, will try archiving it next time 2024-12-08T07:58:12,350 WARN [IPC Server handler 4 on default port 37725 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/WALs/0106a245d0e8,33265,1733644661788/0106a245d0e8%2C33265%2C1733644661788.1733644662110 has not been closed. Lease recovery is in progress. RecoveryId = 1073 for block blk_1073741830_1006 2024-12-08T07:58:12,350 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/WALs/0106a245d0e8,33265,1733644661788/0106a245d0e8%2C33265%2C1733644661788.1733644662110 after 1ms 2024-12-08T07:58:13,343 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:13,455 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:15,344 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:15,455 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:16,352 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/WALs/0106a245d0e8,33265,1733644661788/0106a245d0e8%2C33265%2C1733644661788.1733644662110 after 4003ms 2024-12-08T07:58:17,344 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:17,455 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:18,248 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@1d988be5 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-997826538-172.17.0.2-1733644659391:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:42967,null,null]) java.net.ConnectException: Call From 0106a245d0e8/172.17.0.2 to localhost:35735 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-08T07:58:18,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45087 is added to blk_1073741833_1019 (size=455) 2024-12-08T07:58:18,405 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644662603 to hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/oldWALs/0106a245d0e8%2C43367%2C1733644661968.1733644662603 2024-12-08T07:58:18,407 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644683404 to hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/oldWALs/0106a245d0e8%2C43367%2C1733644661968.1733644683404 2024-12-08T07:58:19,229 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2c4ff1fc[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45087, datanodeUuid=680b3943-ef0b-4b4c-af1f-96c7df4955d5, infoPort=34433, infoSecurePort=0, ipcPort=46807, storageInfo=lv=-57;cid=testClusterID;nsid=466817069;c=1733644659391):Failed to transfer BP-997826538-172.17.0.2-1733644659391:blk_1073741833_1019 to 127.0.0.1:46459 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:19,344 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:19,456 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:21,345 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:21,352 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C43367%2C1733644661968.1733644701351 2024-12-08T07:58:21,356 WARN [Thread-1021 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1074 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:21,356 WARN [Thread-1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741890_1074 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK], DatanodeInfoWithStorage[127.0.0.1:34057,DS-c578cac0-fb58-4498-8432-04367948e478,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK]) is bad. 2024-12-08T07:58:21,356 WARN [Thread-1021 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741890_1074 2024-12-08T07:58:21,357 WARN [Thread-1021 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK] 2024-12-08T07:58:21,365 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:21,365 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:21,365 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:21,365 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:21,365 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:21,365 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644685423 with entries=14, filesize=12.95 KB; new WAL /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644701351 2024-12-08T07:58:21,366 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34433:34433),(127.0.0.1/127.0.0.1:40587:40587)] 2024-12-08T07:58:21,366 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644685423 is not closed yet, will try archiving it next time 2024-12-08T07:58:21,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741872_1055 (size=13268) 2024-12-08T07:58:21,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43367 {}] regionserver.HRegion(8855): Flush requested on 00c40378502d1ce97c64d133305e3b9a 2024-12-08T07:58:21,372 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 00c40378502d1ce97c64d133305e3b9a 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-08T07:58:21,378 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/.tmp/info/8f154b32aad74326b2ed82cc227d9b25 is 1080, key is row0013/info:/1733644701367/Put/seqid=0 2024-12-08T07:58:21,381 WARN [Thread-1027 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1076 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44111 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:21,381 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:41854 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741892_1076] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data6]'}, localName='127.0.0.1:34057', datanodeUuid='1749c9bd-5e00-409e-94fc-ba33301b0ccc', xmitsInProgress=0}:Exception transferring block BP-997826538-172.17.0.2-1733644659391:blk_1073741892_1076 to mirror 127.0.0.1:44111 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:21,381 WARN [Thread-1027 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741892_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34057,DS-c578cac0-fb58-4498-8432-04367948e478,DISK], DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK]) is bad. 2024-12-08T07:58:21,381 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:41854 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741892_1076] {}] datanode.BlockReceiver(316): Block 1073741892 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-08T07:58:21,381 WARN [Thread-1027 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741892_1076 2024-12-08T07:58:21,381 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:41854 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741892_1076] {}] datanode.DataXceiver(331): 127.0.0.1:34057:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41854 dst: /127.0.0.1:34057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:21,382 WARN [Thread-1027 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK] 2024-12-08T07:58:21,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45087 is added to blk_1073741893_1077 (size=9267) 2024-12-08T07:58:21,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741893_1077 (size=9267) 2024-12-08T07:58:21,390 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=67 (bloomFilter=true), to=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/.tmp/info/8f154b32aad74326b2ed82cc227d9b25 2024-12-08T07:58:21,398 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/.tmp/info/8f154b32aad74326b2ed82cc227d9b25 as hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/8f154b32aad74326b2ed82cc227d9b25 2024-12-08T07:58:21,405 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/8f154b32aad74326b2ed82cc227d9b25, entries=4, sequenceid=67, filesize=9.0 K 2024-12-08T07:58:21,407 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7528, heapSize ~8.11 KB/8304, currentSize=8.41 KB/8607 for 00c40378502d1ce97c64d133305e3b9a in 34ms, sequenceid=67, compaction requested=false 2024-12-08T07:58:21,407 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 00c40378502d1ce97c64d133305e3b9a: 2024-12-08T07:58:21,407 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=26.7 K, sizeToCheck=16.0 K 2024-12-08T07:58:21,407 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T07:58:21,407 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/0fda9a6416484f408685067ba4e7768f because midkey is the same as first or last row 2024-12-08T07:58:21,456 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:21,456 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-12-08T07:58:21,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-08T07:58:21,594 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T07:58:21,594 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T07:58:21,594 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T07:58:21,594 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T07:58:21,594 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-08T07:58:21,595 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-08T07:58:21,595 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=821688174, stopped=false 2024-12-08T07:58:21,595 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=0106a245d0e8,33265,1733644661788 2024-12-08T07:58:21,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33265-0x100046e3ad00000, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T07:58:21,660 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42555-0x100046e3ad00002, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T07:58:21,660 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42555-0x100046e3ad00002, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:58:21,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33265-0x100046e3ad00000, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:58:21,660 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T07:58:21,661 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T07:58:21,661 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T07:58:21,661 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T07:58:21,661 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0106a245d0e8,43367,1733644661968' ***** 2024-12-08T07:58:21,661 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T07:58:21,661 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0106a245d0e8,42555,1733644663236' ***** 2024-12-08T07:58:21,661 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T07:58:21,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43367-0x100046e3ad00001, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T07:58:21,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43367-0x100046e3ad00001, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:58:21,661 INFO [RS:0;0106a245d0e8:43367 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T07:58:21,662 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T07:58:21,662 INFO [RS:1;0106a245d0e8:42555 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T07:58:21,662 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33265-0x100046e3ad00000, quorum=127.0.0.1:49932, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T07:58:21,662 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T07:58:21,662 INFO [RS:0;0106a245d0e8:43367 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T07:58:21,662 INFO [RS:1;0106a245d0e8:42555 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T07:58:21,662 INFO [RS:0;0106a245d0e8:43367 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T07:58:21,662 INFO [RS:1;0106a245d0e8:42555 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T07:58:21,662 INFO [RS:1;0106a245d0e8:42555 {}] regionserver.HRegionServer(959): stopping server 0106a245d0e8,42555,1733644663236 2024-12-08T07:58:21,662 INFO [RS:0;0106a245d0e8:43367 {}] regionserver.HRegionServer(3091): Received CLOSE for 00c40378502d1ce97c64d133305e3b9a 2024-12-08T07:58:21,662 INFO [RS:1;0106a245d0e8:42555 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T07:58:21,662 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42555-0x100046e3ad00002, quorum=127.0.0.1:49932, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T07:58:21,662 INFO [RS:1;0106a245d0e8:42555 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;0106a245d0e8:42555. 2024-12-08T07:58:21,662 DEBUG [RS:1;0106a245d0e8:42555 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T07:58:21,662 DEBUG [RS:1;0106a245d0e8:42555 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T07:58:21,662 INFO [RS:0;0106a245d0e8:43367 {}] regionserver.HRegionServer(959): stopping server 0106a245d0e8,43367,1733644661968 2024-12-08T07:58:21,662 INFO [RS:0;0106a245d0e8:43367 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T07:58:21,662 INFO [RS:1;0106a245d0e8:42555 {}] regionserver.HRegionServer(976): stopping server 0106a245d0e8,42555,1733644663236; all regions closed. 2024-12-08T07:58:21,662 INFO [RS:0;0106a245d0e8:43367 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;0106a245d0e8:43367. 2024-12-08T07:58:21,663 DEBUG [RS:0;0106a245d0e8:43367 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T07:58:21,663 DEBUG [RS:0;0106a245d0e8:43367 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T07:58:21,663 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 00c40378502d1ce97c64d133305e3b9a, disabling compactions & flushes 2024-12-08T07:58:21,663 INFO [RS:0;0106a245d0e8:43367 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T07:58:21,663 INFO [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a. 2024-12-08T07:58:21,663 INFO [RS:0;0106a245d0e8:43367 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T07:58:21,663 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a. 2024-12-08T07:58:21,663 INFO [RS:0;0106a245d0e8:43367 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T07:58:21,663 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a. after waiting 0 ms 2024-12-08T07:58:21,663 INFO [RS:0;0106a245d0e8:43367 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-08T07:58:21,663 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a. 2024-12-08T07:58:21,663 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43367-0x100046e3ad00001, quorum=127.0.0.1:49932, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T07:58:21,663 INFO [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 00c40378502d1ce97c64d133305e3b9a 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-12-08T07:58:21,663 INFO [RS:0;0106a245d0e8:43367 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-08T07:58:21,663 DEBUG [RS:0;0106a245d0e8:43367 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 00c40378502d1ce97c64d133305e3b9a=TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a.} 2024-12-08T07:58:21,663 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:21,664 DEBUG [RS:0;0106a245d0e8:43367 {}] regionserver.HRegionServer(1351): Waiting on 00c40378502d1ce97c64d133305e3b9a, 1588230740 2024-12-08T07:58:21,664 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:21,664 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:21,664 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T07:58:21,664 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:21,664 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T07:58:21,664 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T07:58:21,664 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T07:58:21,664 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:21,664 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T07:58:21,665 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-12-08T07:58:21,665 ERROR [FSHLog-0-hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621-prefix:0106a245d0e8,43367,1733644661968.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:21,665 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:21,665 WARN [FSHLog-0-hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621-prefix:0106a245d0e8,43367,1733644661968.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:21,665 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:21,665 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0106a245d0e8%2C43367%2C1733644661968.meta:.meta(num 1733644663048) roll requested 2024-12-08T07:58:21,665 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 2024-12-08T07:58:21,666 INFO [regionserver/0106a245d0e8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C43367%2C1733644661968.meta.1733644701665.meta 2024-12-08T07:58:21,666 WARN [IPC Server handler 0 on default port 37725 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 has not been closed. Lease recovery is in progress. RecoveryId = 1078 for block blk_1073741837_1013 2024-12-08T07:58:21,666 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 after 1ms 2024-12-08T07:58:21,668 WARN [Thread-1036 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1079 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:21,669 WARN [Thread-1036 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741894_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK], DatanodeInfoWithStorage[127.0.0.1:34057,DS-c578cac0-fb58-4498-8432-04367948e478,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK]) is bad. 2024-12-08T07:58:21,669 WARN [Thread-1036 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741894_1079 2024-12-08T07:58:21,669 WARN [Thread-1036 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK] 2024-12-08T07:58:21,670 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/.tmp/info/8ee66ecc5063414cabdf2ce3e5b121b5 is 1080, key is row0016/info:/1733644701374/Put/seqid=0 2024-12-08T07:58:21,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741896_1081 (size=13583) 2024-12-08T07:58:21,690 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:21,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45087 is added to blk_1073741896_1081 (size=13583) 2024-12-08T07:58:21,691 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:21,691 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:21,691 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:21,691 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:21,691 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644701665.meta 2024-12-08T07:58:21,691 INFO [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/.tmp/info/8ee66ecc5063414cabdf2ce3e5b121b5 2024-12-08T07:58:21,691 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:21,692 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42967,DS-e35af792-8aa3-4f36-a955-ecf3072f805a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:21,692 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta 2024-12-08T07:58:21,692 WARN [IPC Server handler 2 on default port 37725 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta has not been closed. Lease recovery is in progress. RecoveryId = 1082 for block blk_1073741834_1010 2024-12-08T07:58:21,692 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta after 0ms 2024-12-08T07:58:21,697 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40587:40587),(127.0.0.1/127.0.0.1:34433:34433)] 2024-12-08T07:58:21,697 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta is not closed yet, will try archiving it next time 2024-12-08T07:58:21,699 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/.tmp/info/8ee66ecc5063414cabdf2ce3e5b121b5 as hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/8ee66ecc5063414cabdf2ce3e5b121b5 2024-12-08T07:58:21,706 INFO [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/8ee66ecc5063414cabdf2ce3e5b121b5, entries=8, sequenceid=78, filesize=13.3 K 2024-12-08T07:58:21,707 INFO [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8607, heapSize ~9.23 KB/9456, currentSize=0 B/0 for 00c40378502d1ce97c64d133305e3b9a in 44ms, sequenceid=78, compaction requested=true 2024-12-08T07:58:21,714 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/hbase/meta/1588230740/.tmp/info/3119b6afda45400180064e7db6486c64 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a./info:regioninfo/1733644663740/Put/seqid=0 2024-12-08T07:58:21,716 WARN [Thread-1048 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1083 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:21,716 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/4cabce9aeb34416988361e0dd4372374, hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/62b2aff2a6f844c1b97f9c010be2f629, hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/186e9f1908b74b41ae3f1061dfe187ea, hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/d7e06a7653924d1ba6099269ffa9d30c, hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/f29d95f35c404ce3a543e184e2743c23, hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/d7d4af017b14401687489b23bf221e53] to archive 2024-12-08T07:58:21,716 WARN [Thread-1048 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741897_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK], DatanodeInfoWithStorage[127.0.0.1:34057,DS-c578cac0-fb58-4498-8432-04367948e478,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK]) is bad. 2024-12-08T07:58:21,716 WARN [Thread-1048 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741897_1083 2024-12-08T07:58:21,717 WARN [Thread-1048 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK] 2024-12-08T07:58:21,717 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T07:58:21,720 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/4cabce9aeb34416988361e0dd4372374 to hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/4cabce9aeb34416988361e0dd4372374 2024-12-08T07:58:21,722 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/62b2aff2a6f844c1b97f9c010be2f629 to hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/62b2aff2a6f844c1b97f9c010be2f629 2024-12-08T07:58:21,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741898_1084 (size=7089) 2024-12-08T07:58:21,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45087 is added to blk_1073741898_1084 (size=7089) 2024-12-08T07:58:21,725 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/hbase/meta/1588230740/.tmp/info/3119b6afda45400180064e7db6486c64 2024-12-08T07:58:21,726 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/186e9f1908b74b41ae3f1061dfe187ea to hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/186e9f1908b74b41ae3f1061dfe187ea 2024-12-08T07:58:21,727 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/d7e06a7653924d1ba6099269ffa9d30c to hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/d7e06a7653924d1ba6099269ffa9d30c 2024-12-08T07:58:21,729 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/f29d95f35c404ce3a543e184e2743c23 to hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/f29d95f35c404ce3a543e184e2743c23 2024-12-08T07:58:21,730 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/d7d4af017b14401687489b23bf221e53 to hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/info/d7d4af017b14401687489b23bf221e53 2024-12-08T07:58:21,731 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=0106a245d0e8:33265 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-08T07:58:21,731 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [4cabce9aeb34416988361e0dd4372374=10347, 62b2aff2a6f844c1b97f9c010be2f629=12506, 186e9f1908b74b41ae3f1061dfe187ea=17994, d7e06a7653924d1ba6099269ffa9d30c=6027, f29d95f35c404ce3a543e184e2743c23=6027, d7d4af017b14401687489b23bf221e53=6027] 2024-12-08T07:58:21,736 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00c40378502d1ce97c64d133305e3b9a/recovered.edits/81.seqid, newMaxSeqId=81, maxSeqId=1 2024-12-08T07:58:21,737 INFO [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a. 2024-12-08T07:58:21,737 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 00c40378502d1ce97c64d133305e3b9a: Waiting for close lock at 1733644701662Running coprocessor pre-close hooks at 1733644701662Disabling compacts and flushes for region at 1733644701662Disabling writes for close at 1733644701663 (+1 ms)Obtaining lock to block concurrent updates at 1733644701663Preparing flush snapshotting stores in 00c40378502d1ce97c64d133305e3b9a at 1733644701663Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a., syncing WAL and waiting on mvcc, flushsize=dataSize=8607, getHeapSize=9456, getOffHeapSize=0, getCellsCount=8 at 1733644701664 (+1 ms)Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a. at 1733644701665 (+1 ms)Flushing 00c40378502d1ce97c64d133305e3b9a/info: creating writer at 1733644701665Flushing 00c40378502d1ce97c64d133305e3b9a/info: appending metadata at 1733644701669 (+4 ms)Flushing 00c40378502d1ce97c64d133305e3b9a/info: closing flushed file at 1733644701669Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1516b0ce: reopening flushed file at 1733644701698 (+29 ms)Finished flush of dataSize ~8.41 KB/8607, heapSize ~9.23 KB/9456, currentSize=0 B/0 for 00c40378502d1ce97c64d133305e3b9a in 44ms, sequenceid=78, compaction requested=true at 1733644701707 (+9 ms)Writing region close event to WAL at 1733644701732 (+25 ms)Running coprocessor post-close hooks at 1733644701736 (+4 ms)Closed at 1733644701736 2024-12-08T07:58:21,737 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733644663366.00c40378502d1ce97c64d133305e3b9a. 2024-12-08T07:58:21,746 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/hbase/meta/1588230740/.tmp/ns/8a7debbebae94a7f8619207a181c2c9c is 43, key is default/ns:d/1733644663128/Put/seqid=0 2024-12-08T07:58:21,748 WARN [Thread-1054 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1085 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:21,748 WARN [Thread-1054 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741899_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK], DatanodeInfoWithStorage[127.0.0.1:34057,DS-c578cac0-fb58-4498-8432-04367948e478,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK]) is bad. 2024-12-08T07:58:21,748 WARN [Thread-1054 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741899_1085 2024-12-08T07:58:21,749 WARN [Thread-1054 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK] 2024-12-08T07:58:21,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741900_1086 (size=5153) 2024-12-08T07:58:21,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45087 is added to blk_1073741900_1086 (size=5153) 2024-12-08T07:58:21,753 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/hbase/meta/1588230740/.tmp/ns/8a7debbebae94a7f8619207a181c2c9c 2024-12-08T07:58:21,767 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.1733644685423 to hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/oldWALs/0106a245d0e8%2C43367%2C1733644661968.1733644685423 2024-12-08T07:58:21,782 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/hbase/meta/1588230740/.tmp/table/5854e81a54184833ab085a1d4daaa977 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1733644663750/Put/seqid=0 2024-12-08T07:58:21,785 WARN [Thread-1061 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741901_1087 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44111 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:21,785 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:41862 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741901_1087] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data4]'}, localName='127.0.0.1:45087', datanodeUuid='680b3943-ef0b-4b4c-af1f-96c7df4955d5', xmitsInProgress=0}:Exception transferring block BP-997826538-172.17.0.2-1733644659391:blk_1073741901_1087 to mirror 127.0.0.1:44111 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:21,785 WARN [Thread-1061 {}] hdfs.DataStreamer(1731): Error Recovery for BP-997826538-172.17.0.2-1733644659391:blk_1073741901_1087 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45087,DS-cb8f59dd-f26e-499c-b680-fa511fb15ff8,DISK], DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK]) is bad. 2024-12-08T07:58:21,785 WARN [Thread-1061 {}] hdfs.DataStreamer(1850): Abandoning BP-997826538-172.17.0.2-1733644659391:blk_1073741901_1087 2024-12-08T07:58:21,785 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:41862 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741901_1087] {}] datanode.BlockReceiver(316): Block 1073741901 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-08T07:58:21,785 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1992179045_22 at /127.0.0.1:41862 [Receiving block BP-997826538-172.17.0.2-1733644659391:blk_1073741901_1087] {}] datanode.DataXceiver(331): 127.0.0.1:45087:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41862 dst: /127.0.0.1:45087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:21,786 WARN [Thread-1061 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44111,DS-4310500d-8ce5-4c64-9946-9148c0439e10,DISK] 2024-12-08T07:58:21,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45087 is added to blk_1073741902_1088 (size=5424) 2024-12-08T07:58:21,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741902_1088 (size=5424) 2024-12-08T07:58:21,790 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/hbase/meta/1588230740/.tmp/table/5854e81a54184833ab085a1d4daaa977 2024-12-08T07:58:21,797 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/hbase/meta/1588230740/.tmp/info/3119b6afda45400180064e7db6486c64 as hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/hbase/meta/1588230740/info/3119b6afda45400180064e7db6486c64 2024-12-08T07:58:21,804 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/hbase/meta/1588230740/info/3119b6afda45400180064e7db6486c64, entries=10, sequenceid=11, filesize=6.9 K 2024-12-08T07:58:21,805 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/hbase/meta/1588230740/.tmp/ns/8a7debbebae94a7f8619207a181c2c9c as hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/hbase/meta/1588230740/ns/8a7debbebae94a7f8619207a181c2c9c 2024-12-08T07:58:21,813 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/hbase/meta/1588230740/ns/8a7debbebae94a7f8619207a181c2c9c, entries=2, sequenceid=11, filesize=5.0 K 2024-12-08T07:58:21,814 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/hbase/meta/1588230740/.tmp/table/5854e81a54184833ab085a1d4daaa977 as hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/hbase/meta/1588230740/table/5854e81a54184833ab085a1d4daaa977 2024-12-08T07:58:21,821 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/hbase/meta/1588230740/table/5854e81a54184833ab085a1d4daaa977, entries=2, sequenceid=11, filesize=5.3 K 2024-12-08T07:58:21,823 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 158ms, sequenceid=11, compaction requested=false 2024-12-08T07:58:21,827 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-08T07:58:21,828 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T07:58:21,828 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T07:58:21,828 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733644701664Running coprocessor pre-close hooks at 1733644701664Disabling compacts and flushes for region at 1733644701664Disabling writes for close at 1733644701664Obtaining lock to block concurrent updates at 1733644701665 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733644701665Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1733644701665Flushing stores of hbase:meta,,1.1588230740 at 1733644701697 (+32 ms)Flushing 1588230740/info: creating writer at 1733644701698 (+1 ms)Flushing 1588230740/info: appending metadata at 1733644701714 (+16 ms)Flushing 1588230740/info: closing flushed file at 1733644701714Flushing 1588230740/ns: creating writer at 1733644701730 (+16 ms)Flushing 1588230740/ns: appending metadata at 1733644701746 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1733644701746Flushing 1588230740/table: creating writer at 1733644701760 (+14 ms)Flushing 1588230740/table: appending metadata at 1733644701782 (+22 ms)Flushing 1588230740/table: closing flushed file at 1733644701782Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3719247f: reopening flushed file at 1733644701796 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@263b320d: reopening flushed file at 1733644701804 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5bcc7ebe: reopening flushed file at 1733644701813 (+9 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 158ms, sequenceid=11, compaction requested=false at 1733644701823 (+10 ms)Writing region close event to WAL at 1733644701824 (+1 ms)Running coprocessor post-close hooks at 1733644701828 (+4 ms)Closed at 1733644701828 2024-12-08T07:58:21,828 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-08T07:58:21,864 INFO [RS:0;0106a245d0e8:43367 {}] regionserver.HRegionServer(976): stopping server 0106a245d0e8,43367,1733644661968; all regions closed. 2024-12-08T07:58:21,864 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:21,864 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:21,865 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:21,865 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:21,865 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:21,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45087 is added to blk_1073741895_1080 (size=825) 2024-12-08T07:58:21,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741895_1080 (size=825) 2024-12-08T07:58:22,369 INFO [regionserver/0106a245d0e8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-08T07:58:22,369 INFO [regionserver/0106a245d0e8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-08T07:58:22,474 INFO [regionserver/0106a245d0e8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T07:58:22,475 INFO [regionserver/0106a245d0e8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-08T07:58:22,475 INFO [regionserver/0106a245d0e8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-08T07:58:23,148 INFO [master/0106a245d0e8:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-08T07:58:23,148 INFO [master/0106a245d0e8:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-08T07:58:23,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741835_1011 (size=393) 2024-12-08T07:58:23,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741831_1007 (size=1321) 2024-12-08T07:58:23,341 INFO [regionserver/0106a245d0e8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T07:58:24,230 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2c4ff1fc[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45087, datanodeUuid=680b3943-ef0b-4b4c-af1f-96c7df4955d5, infoPort=34433, infoSecurePort=0, ipcPort=46807, storageInfo=lv=-57;cid=testClusterID;nsid=466817069;c=1733644659391):Failed to transfer BP-997826538-172.17.0.2-1733644659391:blk_1073741827_1003 to 127.0.0.1:44111 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:24,230 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7db774aa[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45087, datanodeUuid=680b3943-ef0b-4b4c-af1f-96c7df4955d5, infoPort=34433, infoSecurePort=0, ipcPort=46807, storageInfo=lv=-57;cid=testClusterID;nsid=466817069;c=1733644659391):Failed to transfer BP-997826538-172.17.0.2-1733644659391:blk_1073741829_1005 to 127.0.0.1:44111 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:25,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741828_1004 (size=1189) 2024-12-08T07:58:25,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741832_1008 (size=32) 2024-12-08T07:58:25,667 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 after 4002ms 2024-12-08T07:58:25,693 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta after 4001ms 2024-12-08T07:58:25,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45087 is added to blk_1073741872_1055 (size=13268) 2024-12-08T07:58:26,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741826_1002 (size=42) 2024-12-08T07:58:26,665 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-08T07:58:26,667 DEBUG [RS:1;0106a245d0e8:42555 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/oldWALs 2024-12-08T07:58:26,667 INFO [RS:1;0106a245d0e8:42555 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0106a245d0e8%2C42555%2C1733644663236:(num 1733644663469) 2024-12-08T07:58:26,667 DEBUG [RS:1;0106a245d0e8:42555 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T07:58:26,667 INFO [RS:1;0106a245d0e8:42555 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T07:58:26,667 INFO [RS:1;0106a245d0e8:42555 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T07:58:26,668 INFO [RS:1;0106a245d0e8:42555 {}] hbase.ChoreService(370): Chore service for: regionserver/0106a245d0e8:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-08T07:58:26,668 INFO [RS:1;0106a245d0e8:42555 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T07:58:26,668 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T07:58:26,668 INFO [RS:1;0106a245d0e8:42555 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T07:58:26,668 INFO [RS:1;0106a245d0e8:42555 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T07:58:26,668 INFO [RS:1;0106a245d0e8:42555 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T07:58:26,668 INFO [RS:1;0106a245d0e8:42555 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42555 2024-12-08T07:58:26,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:26,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33265-0x100046e3ad00000, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T07:58:26,734 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42555-0x100046e3ad00002, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0106a245d0e8,42555,1733644663236 2024-12-08T07:58:26,734 INFO [RS:1;0106a245d0e8:42555 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T07:58:26,734 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0106a245d0e8,42555,1733644663236] 2024-12-08T07:58:26,738 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:58:26,755 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0106a245d0e8,42555,1733644663236 already deleted, retry=false 2024-12-08T07:58:26,755 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0106a245d0e8,42555,1733644663236 expired; onlineServers=1 2024-12-08T07:58:26,762 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:58:26,762 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:58:26,763 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:58:26,763 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:58:26,764 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:58:26,777 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:58:26,777 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:58:26,844 INFO [RS:1;0106a245d0e8:42555 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T07:58:26,844 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42555-0x100046e3ad00002, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T07:58:26,844 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42555-0x100046e3ad00002, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T07:58:26,844 INFO [RS:1;0106a245d0e8:42555 {}] regionserver.HRegionServer(1031): Exiting; stopping=0106a245d0e8,42555,1733644663236; zookeeper connection closed. 2024-12-08T07:58:26,845 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@69b71030 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@69b71030 2024-12-08T07:58:26,865 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-08T07:58:26,869 DEBUG [RS:0;0106a245d0e8:43367 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/oldWALs 2024-12-08T07:58:26,869 INFO [RS:0;0106a245d0e8:43367 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0106a245d0e8%2C43367%2C1733644661968.meta:.meta(num 1733644701665) 2024-12-08T07:58:26,869 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:26,869 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:26,869 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:26,869 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:26,869 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:26,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45087 is added to blk_1073741891_1075 (size=14682) 2024-12-08T07:58:26,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741891_1075 (size=14682) 2024-12-08T07:58:27,275 DEBUG [RS:0;0106a245d0e8:43367 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/oldWALs 2024-12-08T07:58:27,275 INFO [RS:0;0106a245d0e8:43367 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0106a245d0e8%2C43367%2C1733644661968:(num 1733644701351) 2024-12-08T07:58:27,275 DEBUG [RS:0;0106a245d0e8:43367 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T07:58:27,275 INFO [RS:0;0106a245d0e8:43367 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T07:58:27,275 INFO [RS:0;0106a245d0e8:43367 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T07:58:27,276 INFO [RS:0;0106a245d0e8:43367 {}] hbase.ChoreService(370): Chore service for: regionserver/0106a245d0e8:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-08T07:58:27,276 INFO [RS:0;0106a245d0e8:43367 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T07:58:27,276 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T07:58:27,276 INFO [RS:0;0106a245d0e8:43367 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43367 2024-12-08T07:58:27,279 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-08T07:58:27,300 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:58:27,301 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:58:27,302 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:58:27,302 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:58:27,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:58:27,309 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:58:27,309 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:58:27,311 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:58:27,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33265-0x100046e3ad00000, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T07:58:27,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43367-0x100046e3ad00001, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0106a245d0e8,43367,1733644661968 2024-12-08T07:58:27,323 INFO [RS:0;0106a245d0e8:43367 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T07:58:27,324 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0106a245d0e8,43367,1733644661968] 2024-12-08T07:58:27,344 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0106a245d0e8,43367,1733644661968 already deleted, retry=false 2024-12-08T07:58:27,344 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0106a245d0e8,43367,1733644661968 expired; onlineServers=0 2024-12-08T07:58:27,344 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '0106a245d0e8,33265,1733644661788' ***** 2024-12-08T07:58:27,344 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-08T07:58:27,344 INFO [M:0;0106a245d0e8:33265 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T07:58:27,344 INFO [M:0;0106a245d0e8:33265 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T07:58:27,344 DEBUG [M:0;0106a245d0e8:33265 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-08T07:58:27,345 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-08T07:58:27,345 DEBUG [M:0;0106a245d0e8:33265 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-08T07:58:27,345 DEBUG [master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.large.0-1733644662330 {}] cleaner.HFileCleaner(306): Exit Thread[master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.large.0-1733644662330,5,FailOnTimeoutGroup] 2024-12-08T07:58:27,345 DEBUG [master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.small.0-1733644662330 {}] cleaner.HFileCleaner(306): Exit Thread[master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.small.0-1733644662330,5,FailOnTimeoutGroup] 2024-12-08T07:58:27,345 INFO [M:0;0106a245d0e8:33265 {}] hbase.ChoreService(370): Chore service for: master/0106a245d0e8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-08T07:58:27,345 INFO [M:0;0106a245d0e8:33265 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T07:58:27,345 DEBUG [M:0;0106a245d0e8:33265 {}] master.HMaster(1795): Stopping service threads 2024-12-08T07:58:27,345 INFO [M:0;0106a245d0e8:33265 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-08T07:58:27,345 INFO [M:0;0106a245d0e8:33265 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T07:58:27,345 INFO [M:0;0106a245d0e8:33265 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-08T07:58:27,345 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-08T07:58:27,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33265-0x100046e3ad00000, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T07:58:27,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33265-0x100046e3ad00000, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:58:27,355 DEBUG [M:0;0106a245d0e8:33265 {}] zookeeper.ZKUtil(347): master:33265-0x100046e3ad00000, quorum=127.0.0.1:49932, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-08T07:58:27,355 WARN [M:0;0106a245d0e8:33265 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-08T07:58:27,355 INFO [M:0;0106a245d0e8:33265 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/.lastflushedseqids 2024-12-08T07:58:27,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45087 is added to blk_1073741903_1089 (size=130) 2024-12-08T07:58:27,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741903_1089 (size=130) 2024-12-08T07:58:27,362 INFO [M:0;0106a245d0e8:33265 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-08T07:58:27,362 INFO [M:0;0106a245d0e8:33265 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-08T07:58:27,362 DEBUG [M:0;0106a245d0e8:33265 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T07:58:27,362 INFO [M:0;0106a245d0e8:33265 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:58:27,362 DEBUG [M:0;0106a245d0e8:33265 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:58:27,362 DEBUG [M:0;0106a245d0e8:33265 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T07:58:27,362 DEBUG [M:0;0106a245d0e8:33265 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:58:27,362 INFO [M:0;0106a245d0e8:33265 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-12-08T07:58:27,382 DEBUG [M:0;0106a245d0e8:33265 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4429e0431f1149e4aaedb6ae03d7ff37 is 82, key is hbase:meta,,1/info:regioninfo/1733644663077/Put/seqid=0 2024-12-08T07:58:27,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45087 is added to blk_1073741904_1090 (size=5672) 2024-12-08T07:58:27,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741904_1090 (size=5672) 2024-12-08T07:58:27,388 INFO [M:0;0106a245d0e8:33265 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4429e0431f1149e4aaedb6ae03d7ff37 2024-12-08T07:58:27,415 DEBUG [M:0;0106a245d0e8:33265 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a8062feca3c34d7f83b969158f43ca3d is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733644663754/Put/seqid=0 2024-12-08T07:58:27,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741905_1091 (size=6255) 2024-12-08T07:58:27,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45087 is added to blk_1073741905_1091 (size=6255) 2024-12-08T07:58:27,421 INFO [M:0;0106a245d0e8:33265 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a8062feca3c34d7f83b969158f43ca3d 2024-12-08T07:58:27,427 INFO [M:0;0106a245d0e8:33265 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a8062feca3c34d7f83b969158f43ca3d 2024-12-08T07:58:27,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43367-0x100046e3ad00001, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T07:58:27,434 INFO [RS:0;0106a245d0e8:43367 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T07:58:27,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43367-0x100046e3ad00001, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T07:58:27,434 INFO [RS:0;0106a245d0e8:43367 {}] regionserver.HRegionServer(1031): Exiting; stopping=0106a245d0e8,43367,1733644661968; zookeeper connection closed. 2024-12-08T07:58:27,434 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6be94a43 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6be94a43 2024-12-08T07:58:27,434 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-12-08T07:58:27,442 DEBUG [M:0;0106a245d0e8:33265 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/82be8fb09179475f8fe9b4dec868937b is 69, key is 0106a245d0e8,42555,1733644663236/rs:state/1733644663313/Put/seqid=0 2024-12-08T07:58:27,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741906_1092 (size=5224) 2024-12-08T07:58:27,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45087 is added to blk_1073741906_1092 (size=5224) 2024-12-08T07:58:27,448 INFO [M:0;0106a245d0e8:33265 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/82be8fb09179475f8fe9b4dec868937b 2024-12-08T07:58:27,468 DEBUG [M:0;0106a245d0e8:33265 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7850e2c82f614761b3918e9240938d63 is 52, key is load_balancer_on/state:d/1733644663212/Put/seqid=0 2024-12-08T07:58:27,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45087 is added to blk_1073741907_1093 (size=5056) 2024-12-08T07:58:27,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741907_1093 (size=5056) 2024-12-08T07:58:27,473 INFO [M:0;0106a245d0e8:33265 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7850e2c82f614761b3918e9240938d63 2024-12-08T07:58:27,480 DEBUG [M:0;0106a245d0e8:33265 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4429e0431f1149e4aaedb6ae03d7ff37 as hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4429e0431f1149e4aaedb6ae03d7ff37 2024-12-08T07:58:27,485 INFO [M:0;0106a245d0e8:33265 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4429e0431f1149e4aaedb6ae03d7ff37, entries=8, sequenceid=60, filesize=5.5 K 2024-12-08T07:58:27,486 DEBUG [M:0;0106a245d0e8:33265 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a8062feca3c34d7f83b969158f43ca3d as hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a8062feca3c34d7f83b969158f43ca3d 2024-12-08T07:58:27,493 INFO [M:0;0106a245d0e8:33265 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a8062feca3c34d7f83b969158f43ca3d 2024-12-08T07:58:27,493 INFO [M:0;0106a245d0e8:33265 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a8062feca3c34d7f83b969158f43ca3d, entries=6, sequenceid=60, filesize=6.1 K 2024-12-08T07:58:27,494 DEBUG [M:0;0106a245d0e8:33265 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/82be8fb09179475f8fe9b4dec868937b as hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/82be8fb09179475f8fe9b4dec868937b 2024-12-08T07:58:27,500 INFO [M:0;0106a245d0e8:33265 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/82be8fb09179475f8fe9b4dec868937b, entries=2, sequenceid=60, filesize=5.1 K 2024-12-08T07:58:27,501 DEBUG [M:0;0106a245d0e8:33265 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7850e2c82f614761b3918e9240938d63 as hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7850e2c82f614761b3918e9240938d63 2024-12-08T07:58:27,508 INFO [M:0;0106a245d0e8:33265 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7850e2c82f614761b3918e9240938d63, entries=1, sequenceid=60, filesize=4.9 K 2024-12-08T07:58:27,510 INFO [M:0;0106a245d0e8:33265 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 147ms, sequenceid=60, compaction requested=false 2024-12-08T07:58:27,513 INFO [M:0;0106a245d0e8:33265 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:58:27,514 DEBUG [M:0;0106a245d0e8:33265 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733644707362Disabling compacts and flushes for region at 1733644707362Disabling writes for close at 1733644707362Obtaining lock to block concurrent updates at 1733644707362Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733644707362Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1733644707363 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733644707363Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733644707363Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733644707381 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733644707381Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733644707399 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733644707415 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733644707415Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733644707427 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733644707442 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733644707442Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733644707453 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733644707468 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733644707468Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@59e56cac: reopening flushed file at 1733644707479 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@263ea73a: reopening flushed file at 1733644707485 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@79f52124: reopening flushed file at 1733644707493 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@14e987cc: reopening flushed file at 1733644707500 (+7 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 147ms, sequenceid=60, compaction requested=false at 1733644707510 (+10 ms)Writing region close event to WAL at 1733644707513 (+3 ms)Closed at 1733644707513 2024-12-08T07:58:27,514 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:27,514 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:27,514 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:27,514 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:27,514 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:27,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741889_1072 (size=1045) 2024-12-08T07:58:27,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45087 is added to blk_1073741889_1072 (size=1045) 2024-12-08T07:58:27,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:27,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:28,168 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-08T07:58:28,168 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T07:58:28,168 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-08T07:58:28,168 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-08T07:58:28,251 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@5c1430d1 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-997826538-172.17.0.2-1733644659391:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:42967,null,null]) java.net.ConnectException: Call From 0106a245d0e8/172.17.0.2 to localhost:35735 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-08T07:58:28,361 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/WALs/0106a245d0e8,33265,1733644661788/0106a245d0e8%2C33265%2C1733644661788.1733644662110 to hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/oldWALs/0106a245d0e8%2C33265%2C1733644661788.1733644662110 2024-12-08T07:58:28,364 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/MasterData/oldWALs/0106a245d0e8%2C33265%2C1733644661788.1733644662110 to hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/oldWALs/0106a245d0e8%2C33265%2C1733644661788.1733644662110$masterlocalwal$ 2024-12-08T07:58:28,364 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T07:58:28,364 INFO [M:0;0106a245d0e8:33265 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-08T07:58:28,364 INFO [M:0;0106a245d0e8:33265 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33265 2024-12-08T07:58:28,365 INFO [M:0;0106a245d0e8:33265 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T07:58:28,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33265-0x100046e3ad00000, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T07:58:28,486 INFO [M:0;0106a245d0e8:33265 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T07:58:28,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33265-0x100046e3ad00000, quorum=127.0.0.1:49932, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T07:58:28,488 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@575d7e2f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:58:28,489 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@39eaf0e6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T07:58:28,489 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T07:58:28,489 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54f27916{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T07:58:28,489 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@22c6c03b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/hadoop.log.dir/,STOPPED} 2024-12-08T07:58:28,490 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T07:58:28,490 WARN [BP-997826538-172.17.0.2-1733644659391 heartbeating to localhost/127.0.0.1:37725 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T07:58:28,490 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T07:58:28,490 WARN [BP-997826538-172.17.0.2-1733644659391 heartbeating to localhost/127.0.0.1:37725 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-997826538-172.17.0.2-1733644659391 (Datanode Uuid 680b3943-ef0b-4b4c-af1f-96c7df4955d5) service to localhost/127.0.0.1:37725 2024-12-08T07:58:28,490 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@24291469 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-997826538-172.17.0.2-1733644659391:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:42967,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:35735 , LocalHost:localPort 0106a245d0e8/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-08T07:58:28,490 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@24291469 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-997826538-172.17.0.2-1733644659391:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:45087,null,null], DatanodeInfoWithStorage[127.0.0.1:42967,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-997826538-172.17.0.2-1733644659391 2024-12-08T07:58:28,490 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@24291469 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-997826538-172.17.0.2-1733644659391:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:45087,null,null]) java.io.IOException: No block pool offer service for bpid=BP-997826538-172.17.0.2-1733644659391 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:28,490 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@24291469 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-997826538-172.17.0.2-1733644659391:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:42967,null,null]) java.io.IOException: No block pool offer service for bpid=BP-997826538-172.17.0.2-1733644659391 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:28,491 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@24291469 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-997826538-172.17.0.2-1733644659391:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:45087,null,null], DatanodeInfoWithStorage[127.0.0.1:42967,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-997826538-172.17.0.2-1733644659391:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:45087,null,null], DatanodeInfoWithStorage[127.0.0.1:42967,null,null]] 2024-12-08T07:58:28,491 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data3/current/BP-997826538-172.17.0.2-1733644659391 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T07:58:28,491 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data4/current/BP-997826538-172.17.0.2-1733644659391 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T07:58:28,491 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T07:58:28,494 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@304af6f5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:58:28,494 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@41559526{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T07:58:28,494 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T07:58:28,494 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65349436{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T07:58:28,494 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c68f920{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/hadoop.log.dir/,STOPPED} 2024-12-08T07:58:28,495 WARN [BP-997826538-172.17.0.2-1733644659391 heartbeating to localhost/127.0.0.1:37725 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T07:58:28,495 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T07:58:28,496 WARN [BP-997826538-172.17.0.2-1733644659391 heartbeating to localhost/127.0.0.1:37725 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-997826538-172.17.0.2-1733644659391 (Datanode Uuid 1749c9bd-5e00-409e-94fc-ba33301b0ccc) service to localhost/127.0.0.1:37725 2024-12-08T07:58:28,496 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T07:58:28,496 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data5/current/BP-997826538-172.17.0.2-1733644659391 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T07:58:28,496 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/cluster_b5cab470-d039-a8ef-7360-c4a9fc729c8c/data/data6/current/BP-997826538-172.17.0.2-1733644659391 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T07:58:28,497 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T07:58:28,502 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3c461833{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T07:58:28,502 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@33e53d1d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T07:58:28,502 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T07:58:28,503 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ab5393f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T07:58:28,503 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@692b8c40{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/hadoop.log.dir/,STOPPED} 2024-12-08T07:58:28,516 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-08T07:58:28,551 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-08T07:58:28,559 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=154 (was 78) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:36351 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36351 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37725 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37725 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37725 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37725 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:37725 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f1e04bf5b70.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37725 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37725 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37725 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37725 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f1e04bf5b70.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37725 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:37725 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=191 (was 181) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=8441 (was 9365) 2024-12-08T07:58:28,566 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=154, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=191, ProcessCount=11, AvailableMemoryMB=8441 2024-12-08T07:58:28,566 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-08T07:58:28,566 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/hadoop.log.dir so I do NOT create it in target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a 2024-12-08T07:58:28,566 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91a44b19-97c8-4f9a-c5d1-a0261e4f57a6/hadoop.tmp.dir so I do NOT create it in target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a 2024-12-08T07:58:28,566 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/cluster_c806b1b6-4479-4c06-7f25-b09e8cb54243, deleteOnExit=true 2024-12-08T07:58:28,566 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-08T07:58:28,567 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/test.cache.data in system properties and HBase conf 2024-12-08T07:58:28,567 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/hadoop.tmp.dir in system properties and HBase conf 2024-12-08T07:58:28,567 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/hadoop.log.dir in system properties and HBase conf 2024-12-08T07:58:28,567 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-08T07:58:28,567 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-08T07:58:28,567 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-08T07:58:28,567 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-08T07:58:28,567 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-08T07:58:28,567 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-08T07:58:28,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-08T07:58:28,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T07:58:28,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-08T07:58:28,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-08T07:58:28,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T07:58:28,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T07:58:28,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-08T07:58:28,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/nfs.dump.dir in system properties and HBase conf 2024-12-08T07:58:28,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/java.io.tmpdir in system properties and HBase conf 2024-12-08T07:58:28,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T07:58:28,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-08T07:58:28,569 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-08T07:58:28,582 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T07:58:28,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:28,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:28,953 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T07:58:28,958 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T07:58:28,973 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T07:58:28,973 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T07:58:28,973 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T07:58:28,974 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T07:58:28,974 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@37ba1ac4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/hadoop.log.dir/,AVAILABLE} 2024-12-08T07:58:28,974 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@42369482{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T07:58:29,074 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@482b646b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/java.io.tmpdir/jetty-localhost-40265-hadoop-hdfs-3_4_1-tests_jar-_-any-9897992202772602317/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T07:58:29,074 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7c7a3196{HTTP/1.1, (http/1.1)}{localhost:40265} 2024-12-08T07:58:29,074 INFO [Time-limited test {}] server.Server(415): Started @156642ms 2024-12-08T07:58:29,086 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T07:58:29,324 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T07:58:29,328 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T07:58:29,329 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T07:58:29,329 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T07:58:29,329 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T07:58:29,330 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@34c1099f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/hadoop.log.dir/,AVAILABLE} 2024-12-08T07:58:29,330 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5d6935b5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T07:58:29,432 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4a16b3f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/java.io.tmpdir/jetty-localhost-35605-hadoop-hdfs-3_4_1-tests_jar-_-any-7660045616392433310/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:58:29,433 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@27ffc774{HTTP/1.1, (http/1.1)}{localhost:35605} 2024-12-08T07:58:29,433 INFO [Time-limited test {}] server.Server(415): Started @157001ms 2024-12-08T07:58:29,435 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T07:58:29,462 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T07:58:29,465 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T07:58:29,469 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T07:58:29,469 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T07:58:29,469 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T07:58:29,472 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1118a265{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/hadoop.log.dir/,AVAILABLE} 2024-12-08T07:58:29,472 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1b91ed3f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T07:58:29,571 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@27a17641{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/java.io.tmpdir/jetty-localhost-42263-hadoop-hdfs-3_4_1-tests_jar-_-any-13413849118977238026/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:58:29,572 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@62cd82cb{HTTP/1.1, (http/1.1)}{localhost:42263} 2024-12-08T07:58:29,572 INFO [Time-limited test {}] server.Server(415): Started @157139ms 2024-12-08T07:58:29,573 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T07:58:29,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:29,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:30,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:30,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:30,726 WARN [Thread-1200 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/cluster_c806b1b6-4479-4c06-7f25-b09e8cb54243/data/data1/current/BP-610407669-172.17.0.2-1733644708587/current, will proceed with Du for space computation calculation, 2024-12-08T07:58:30,726 WARN [Thread-1201 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/cluster_c806b1b6-4479-4c06-7f25-b09e8cb54243/data/data2/current/BP-610407669-172.17.0.2-1733644708587/current, will proceed with Du for space computation calculation, 2024-12-08T07:58:30,745 WARN [Thread-1164 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T07:58:30,747 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2a98cf73f2393fd3 with lease ID 0x8383db8749104429: Processing first storage report for DS-5d557bf9-4278-493d-a6cf-d2b9043b3d49 from datanode DatanodeRegistration(127.0.0.1:34575, datanodeUuid=86420e16-6872-4605-8a97-c126c7de6b47, infoPort=45039, infoSecurePort=0, ipcPort=44801, storageInfo=lv=-57;cid=testClusterID;nsid=1415552170;c=1733644708587) 2024-12-08T07:58:30,747 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2a98cf73f2393fd3 with lease ID 0x8383db8749104429: from storage DS-5d557bf9-4278-493d-a6cf-d2b9043b3d49 node DatanodeRegistration(127.0.0.1:34575, datanodeUuid=86420e16-6872-4605-8a97-c126c7de6b47, infoPort=45039, infoSecurePort=0, ipcPort=44801, storageInfo=lv=-57;cid=testClusterID;nsid=1415552170;c=1733644708587), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:58:30,747 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2a98cf73f2393fd3 with lease ID 0x8383db8749104429: Processing first storage report for DS-ed672161-3e84-4f00-871c-69c59f527d43 from datanode DatanodeRegistration(127.0.0.1:34575, datanodeUuid=86420e16-6872-4605-8a97-c126c7de6b47, infoPort=45039, infoSecurePort=0, ipcPort=44801, storageInfo=lv=-57;cid=testClusterID;nsid=1415552170;c=1733644708587) 2024-12-08T07:58:30,747 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2a98cf73f2393fd3 with lease ID 0x8383db8749104429: from storage DS-ed672161-3e84-4f00-871c-69c59f527d43 node DatanodeRegistration(127.0.0.1:34575, datanodeUuid=86420e16-6872-4605-8a97-c126c7de6b47, infoPort=45039, infoSecurePort=0, ipcPort=44801, storageInfo=lv=-57;cid=testClusterID;nsid=1415552170;c=1733644708587), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:58:30,866 WARN [Thread-1211 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/cluster_c806b1b6-4479-4c06-7f25-b09e8cb54243/data/data3/current/BP-610407669-172.17.0.2-1733644708587/current, will proceed with Du for space computation calculation, 2024-12-08T07:58:30,866 WARN [Thread-1212 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/cluster_c806b1b6-4479-4c06-7f25-b09e8cb54243/data/data4/current/BP-610407669-172.17.0.2-1733644708587/current, will proceed with Du for space computation calculation, 2024-12-08T07:58:30,884 WARN [Thread-1187 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T07:58:30,886 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x48b6dfdcfb80ea3 with lease ID 0x8383db874910442a: Processing first storage report for DS-bb5c09d6-5850-4601-95c1-c11917a34e29 from datanode DatanodeRegistration(127.0.0.1:42133, datanodeUuid=cc6fc935-c807-410e-9199-7ae51bb9da38, infoPort=33601, infoSecurePort=0, ipcPort=34019, storageInfo=lv=-57;cid=testClusterID;nsid=1415552170;c=1733644708587) 2024-12-08T07:58:30,887 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x48b6dfdcfb80ea3 with lease ID 0x8383db874910442a: from storage DS-bb5c09d6-5850-4601-95c1-c11917a34e29 node DatanodeRegistration(127.0.0.1:42133, datanodeUuid=cc6fc935-c807-410e-9199-7ae51bb9da38, infoPort=33601, infoSecurePort=0, ipcPort=34019, storageInfo=lv=-57;cid=testClusterID;nsid=1415552170;c=1733644708587), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:58:30,887 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x48b6dfdcfb80ea3 with lease ID 0x8383db874910442a: Processing first storage report for DS-9edc9754-1f83-4f0f-a411-f4b5f5c5cfce from datanode DatanodeRegistration(127.0.0.1:42133, datanodeUuid=cc6fc935-c807-410e-9199-7ae51bb9da38, infoPort=33601, infoSecurePort=0, ipcPort=34019, storageInfo=lv=-57;cid=testClusterID;nsid=1415552170;c=1733644708587) 2024-12-08T07:58:30,887 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x48b6dfdcfb80ea3 with lease ID 0x8383db874910442a: from storage DS-9edc9754-1f83-4f0f-a411-f4b5f5c5cfce node DatanodeRegistration(127.0.0.1:42133, datanodeUuid=cc6fc935-c807-410e-9199-7ae51bb9da38, infoPort=33601, infoSecurePort=0, ipcPort=34019, storageInfo=lv=-57;cid=testClusterID;nsid=1415552170;c=1733644708587), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:58:30,906 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a 2024-12-08T07:58:30,909 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/cluster_c806b1b6-4479-4c06-7f25-b09e8cb54243/zookeeper_0, clientPort=51084, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/cluster_c806b1b6-4479-4c06-7f25-b09e8cb54243/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/cluster_c806b1b6-4479-4c06-7f25-b09e8cb54243/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-08T07:58:30,910 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51084 2024-12-08T07:58:30,910 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:58:30,912 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:58:30,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34575 is added to blk_1073741825_1001 (size=7) 2024-12-08T07:58:30,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42133 is added to blk_1073741825_1001 (size=7) 2024-12-08T07:58:30,921 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42 with version=8 2024-12-08T07:58:30,921 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/hbase-staging 2024-12-08T07:58:30,922 INFO [Time-limited test {}] client.ConnectionUtils(128): master/0106a245d0e8:0 server-side Connection retries=45 2024-12-08T07:58:30,923 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T07:58:30,923 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T07:58:30,923 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T07:58:30,923 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T07:58:30,923 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T07:58:30,923 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-08T07:58:30,923 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T07:58:30,924 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35031 2024-12-08T07:58:30,925 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35031 connecting to ZooKeeper ensemble=127.0.0.1:51084 2024-12-08T07:58:31,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:350310x0, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T07:58:31,014 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35031-0x100046efac30000 connected 2024-12-08T07:58:31,106 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:58:31,108 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:58:31,110 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35031-0x100046efac30000, quorum=127.0.0.1:51084, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T07:58:31,110 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42, hbase.cluster.distributed=false 2024-12-08T07:58:31,111 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35031-0x100046efac30000, quorum=127.0.0.1:51084, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T07:58:31,112 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35031 2024-12-08T07:58:31,112 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35031 2024-12-08T07:58:31,112 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35031 2024-12-08T07:58:31,113 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35031 2024-12-08T07:58:31,113 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35031 2024-12-08T07:58:31,129 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0106a245d0e8:0 server-side Connection retries=45 2024-12-08T07:58:31,129 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T07:58:31,129 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T07:58:31,129 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T07:58:31,129 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T07:58:31,129 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T07:58:31,129 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T07:58:31,129 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T07:58:31,130 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40501 2024-12-08T07:58:31,132 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40501 connecting to ZooKeeper ensemble=127.0.0.1:51084 2024-12-08T07:58:31,132 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:58:31,135 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:58:31,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:405010x0, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T07:58:31,148 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40501-0x100046efac30001 connected 2024-12-08T07:58:31,148 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40501-0x100046efac30001, quorum=127.0.0.1:51084, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T07:58:31,148 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T07:58:31,149 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T07:58:31,150 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40501-0x100046efac30001, quorum=127.0.0.1:51084, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T07:58:31,151 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40501-0x100046efac30001, quorum=127.0.0.1:51084, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T07:58:31,153 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40501 2024-12-08T07:58:31,153 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40501 2024-12-08T07:58:31,153 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40501 2024-12-08T07:58:31,154 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40501 2024-12-08T07:58:31,154 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40501 2024-12-08T07:58:31,173 DEBUG [M:0;0106a245d0e8:35031 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0106a245d0e8:35031 2024-12-08T07:58:31,173 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/0106a245d0e8,35031,1733644710922 2024-12-08T07:58:31,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40501-0x100046efac30001, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T07:58:31,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35031-0x100046efac30000, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T07:58:31,186 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35031-0x100046efac30000, quorum=127.0.0.1:51084, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0106a245d0e8,35031,1733644710922 2024-12-08T07:58:31,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40501-0x100046efac30001, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T07:58:31,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35031-0x100046efac30000, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:58:31,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40501-0x100046efac30001, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:58:31,196 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35031-0x100046efac30000, quorum=127.0.0.1:51084, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T07:58:31,197 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0106a245d0e8,35031,1733644710922 from backup master directory 2024-12-08T07:58:31,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35031-0x100046efac30000, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0106a245d0e8,35031,1733644710922 2024-12-08T07:58:31,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40501-0x100046efac30001, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T07:58:31,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35031-0x100046efac30000, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T07:58:31,206 WARN [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T07:58:31,206 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0106a245d0e8,35031,1733644710922 2024-12-08T07:58:31,211 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/hbase.id] with ID: d1b90d0b-5109-41e6-9729-0bce1ce4cd51 2024-12-08T07:58:31,211 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/.tmp/hbase.id 2024-12-08T07:58:31,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34575 is added to blk_1073741826_1002 (size=42) 2024-12-08T07:58:31,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42133 is added to blk_1073741826_1002 (size=42) 2024-12-08T07:58:31,218 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/.tmp/hbase.id]:[hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/hbase.id] 2024-12-08T07:58:31,230 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:58:31,230 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-08T07:58:31,231 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-08T07:58:31,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35031-0x100046efac30000, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:58:31,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40501-0x100046efac30001, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:58:31,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42133 is added to blk_1073741827_1003 (size=196) 2024-12-08T07:58:31,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34575 is added to blk_1073741827_1003 (size=196) 2024-12-08T07:58:31,254 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T07:58:31,255 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-08T07:58:31,255 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T07:58:31,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42133 is added to blk_1073741828_1004 (size=1189) 2024-12-08T07:58:31,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34575 is added to blk_1073741828_1004 (size=1189) 2024-12-08T07:58:31,266 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/data/master/store 2024-12-08T07:58:31,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42133 is added to blk_1073741829_1005 (size=34) 2024-12-08T07:58:31,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34575 is added to blk_1073741829_1005 (size=34) 2024-12-08T07:58:31,273 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T07:58:31,273 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T07:58:31,273 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:58:31,273 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:58:31,273 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T07:58:31,273 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:58:31,273 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:58:31,273 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733644711273Disabling compacts and flushes for region at 1733644711273Disabling writes for close at 1733644711273Writing region close event to WAL at 1733644711273Closed at 1733644711273 2024-12-08T07:58:31,274 WARN [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/data/master/store/.initializing 2024-12-08T07:58:31,274 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/WALs/0106a245d0e8,35031,1733644710922 2024-12-08T07:58:31,278 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0106a245d0e8%2C35031%2C1733644710922, suffix=, logDir=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/WALs/0106a245d0e8,35031,1733644710922, archiveDir=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/oldWALs, maxLogs=10 2024-12-08T07:58:31,278 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C35031%2C1733644710922.1733644711278 2024-12-08T07:58:31,284 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/WALs/0106a245d0e8,35031,1733644710922/0106a245d0e8%2C35031%2C1733644710922.1733644711278 2024-12-08T07:58:31,303 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33601:33601),(127.0.0.1/127.0.0.1:45039:45039)] 2024-12-08T07:58:31,325 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-08T07:58:31,325 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T07:58:31,325 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:58:31,325 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:58:31,327 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:58:31,329 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-08T07:58:31,329 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:58:31,329 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:58:31,330 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:58:31,331 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-08T07:58:31,331 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:58:31,332 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T07:58:31,332 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:58:31,333 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-08T07:58:31,333 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:58:31,334 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T07:58:31,334 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:58:31,335 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-08T07:58:31,336 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:58:31,336 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T07:58:31,336 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:58:31,337 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:58:31,338 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:58:31,339 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:58:31,339 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:58:31,340 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T07:58:31,341 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:58:31,344 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T07:58:31,344 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=830842, jitterRate=0.056470707058906555}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T07:58:31,345 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733644711325Initializing all the Stores at 1733644711327 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644711327Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644711327Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644711327Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644711327Cleaning up temporary data from old regions at 1733644711339 (+12 ms)Region opened successfully at 1733644711345 (+6 ms) 2024-12-08T07:58:31,345 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-08T07:58:31,349 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65ab028f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0106a245d0e8/172.17.0.2:0 2024-12-08T07:58:31,350 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-08T07:58:31,350 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-08T07:58:31,350 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-08T07:58:31,350 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-08T07:58:31,351 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-08T07:58:31,351 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-08T07:58:31,351 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-08T07:58:31,354 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-08T07:58:31,355 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35031-0x100046efac30000, quorum=127.0.0.1:51084, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-08T07:58:31,364 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-08T07:58:31,365 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-08T07:58:31,366 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35031-0x100046efac30000, quorum=127.0.0.1:51084, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-08T07:58:31,375 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-08T07:58:31,375 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-08T07:58:31,376 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35031-0x100046efac30000, quorum=127.0.0.1:51084, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-08T07:58:31,389 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-08T07:58:31,390 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35031-0x100046efac30000, quorum=127.0.0.1:51084, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-08T07:58:31,399 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-08T07:58:31,402 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35031-0x100046efac30000, quorum=127.0.0.1:51084, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-08T07:58:31,410 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-08T07:58:31,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40501-0x100046efac30001, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T07:58:31,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40501-0x100046efac30001, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:58:31,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35031-0x100046efac30000, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T07:58:31,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35031-0x100046efac30000, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:58:31,421 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=0106a245d0e8,35031,1733644710922, sessionid=0x100046efac30000, setting cluster-up flag (Was=false) 2024-12-08T07:58:31,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40501-0x100046efac30001, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:58:31,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35031-0x100046efac30000, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:58:31,473 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-08T07:58:31,474 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0106a245d0e8,35031,1733644710922 2024-12-08T07:58:31,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40501-0x100046efac30001, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:58:31,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35031-0x100046efac30000, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:58:31,526 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-08T07:58:31,527 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0106a245d0e8,35031,1733644710922 2024-12-08T07:58:31,529 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-08T07:58:31,530 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-08T07:58:31,531 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-08T07:58:31,531 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-08T07:58:31,531 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0106a245d0e8,35031,1733644710922 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-08T07:58:31,532 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0106a245d0e8:0, corePoolSize=5, maxPoolSize=5 2024-12-08T07:58:31,532 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0106a245d0e8:0, corePoolSize=5, maxPoolSize=5 2024-12-08T07:58:31,532 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0106a245d0e8:0, corePoolSize=5, maxPoolSize=5 2024-12-08T07:58:31,532 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0106a245d0e8:0, corePoolSize=5, maxPoolSize=5 2024-12-08T07:58:31,533 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0106a245d0e8:0, corePoolSize=10, maxPoolSize=10 2024-12-08T07:58:31,533 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:58:31,533 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0106a245d0e8:0, corePoolSize=2, maxPoolSize=2 2024-12-08T07:58:31,533 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:58:31,534 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733644741534 2024-12-08T07:58:31,534 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-08T07:58:31,534 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-08T07:58:31,534 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-08T07:58:31,535 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-08T07:58:31,535 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-08T07:58:31,535 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-08T07:58:31,535 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T07:58:31,535 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T07:58:31,535 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-08T07:58:31,535 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-08T07:58:31,535 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-08T07:58:31,535 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-08T07:58:31,536 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-08T07:58:31,536 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-08T07:58:31,536 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.large.0-1733644711536,5,FailOnTimeoutGroup] 2024-12-08T07:58:31,536 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.small.0-1733644711536,5,FailOnTimeoutGroup] 2024-12-08T07:58:31,536 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T07:58:31,536 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-08T07:58:31,536 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-08T07:58:31,536 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-08T07:58:31,537 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:58:31,537 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-08T07:58:31,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42133 is added to blk_1073741831_1007 (size=1321) 2024-12-08T07:58:31,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34575 is added to blk_1073741831_1007 (size=1321) 2024-12-08T07:58:31,546 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-08T07:58:31,546 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42 2024-12-08T07:58:31,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34575 is added to blk_1073741832_1008 (size=32) 2024-12-08T07:58:31,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42133 is added to blk_1073741832_1008 (size=32) 2024-12-08T07:58:31,557 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T07:58:31,558 INFO [RS:0;0106a245d0e8:40501 {}] regionserver.HRegionServer(746): ClusterId : d1b90d0b-5109-41e6-9729-0bce1ce4cd51 2024-12-08T07:58:31,558 DEBUG [RS:0;0106a245d0e8:40501 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T07:58:31,559 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T07:58:31,561 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T07:58:31,561 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:58:31,561 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:58:31,562 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T07:58:31,563 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T07:58:31,563 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:58:31,564 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:58:31,564 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T07:58:31,566 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T07:58:31,566 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:58:31,566 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:58:31,567 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T07:58:31,568 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T07:58:31,568 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:58:31,568 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:58:31,568 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T07:58:31,569 DEBUG [RS:0;0106a245d0e8:40501 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T07:58:31,569 DEBUG [RS:0;0106a245d0e8:40501 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T07:58:31,569 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/data/hbase/meta/1588230740 2024-12-08T07:58:31,569 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/data/hbase/meta/1588230740 2024-12-08T07:58:31,571 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T07:58:31,571 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T07:58:31,571 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T07:58:31,572 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T07:58:31,574 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T07:58:31,575 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=718610, jitterRate=-0.08624096214771271}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T07:58:31,576 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733644711557Initializing all the Stores at 1733644711559 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644711559Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644711559Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644711559Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644711559Cleaning up temporary data from old regions at 1733644711571 (+12 ms)Region opened successfully at 1733644711576 (+5 ms) 2024-12-08T07:58:31,576 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T07:58:31,576 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T07:58:31,576 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T07:58:31,576 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T07:58:31,576 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T07:58:31,577 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T07:58:31,577 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733644711576Disabling compacts and flushes for region at 1733644711576Disabling writes for close at 1733644711576Writing region close event to WAL at 1733644711577 (+1 ms)Closed at 1733644711577 2024-12-08T07:58:31,578 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T07:58:31,578 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-08T07:58:31,578 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-08T07:58:31,579 DEBUG [RS:0;0106a245d0e8:40501 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T07:58:31,580 DEBUG [RS:0;0106a245d0e8:40501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36d38375, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0106a245d0e8/172.17.0.2:0 2024-12-08T07:58:31,580 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T07:58:31,581 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-08T07:58:31,592 DEBUG [RS:0;0106a245d0e8:40501 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0106a245d0e8:40501 2024-12-08T07:58:31,593 INFO [RS:0;0106a245d0e8:40501 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T07:58:31,593 INFO [RS:0;0106a245d0e8:40501 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T07:58:31,593 DEBUG [RS:0;0106a245d0e8:40501 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T07:58:31,593 INFO [RS:0;0106a245d0e8:40501 {}] regionserver.HRegionServer(2659): reportForDuty to master=0106a245d0e8,35031,1733644710922 with port=40501, startcode=1733644711128 2024-12-08T07:58:31,594 DEBUG [RS:0;0106a245d0e8:40501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T07:58:31,596 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47729, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T07:58:31,596 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35031 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0106a245d0e8,40501,1733644711128 2024-12-08T07:58:31,597 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35031 {}] master.ServerManager(517): Registering regionserver=0106a245d0e8,40501,1733644711128 2024-12-08T07:58:31,598 DEBUG [RS:0;0106a245d0e8:40501 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42 2024-12-08T07:58:31,598 DEBUG [RS:0;0106a245d0e8:40501 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45485 2024-12-08T07:58:31,598 DEBUG [RS:0;0106a245d0e8:40501 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T07:58:31,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35031-0x100046efac30000, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T07:58:31,611 DEBUG [RS:0;0106a245d0e8:40501 {}] zookeeper.ZKUtil(111): regionserver:40501-0x100046efac30001, quorum=127.0.0.1:51084, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0106a245d0e8,40501,1733644711128 2024-12-08T07:58:31,611 WARN [RS:0;0106a245d0e8:40501 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T07:58:31,611 INFO [RS:0;0106a245d0e8:40501 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T07:58:31,611 DEBUG [RS:0;0106a245d0e8:40501 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128 2024-12-08T07:58:31,611 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0106a245d0e8,40501,1733644711128] 2024-12-08T07:58:31,614 INFO [RS:0;0106a245d0e8:40501 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T07:58:31,616 INFO [RS:0;0106a245d0e8:40501 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T07:58:31,617 INFO [RS:0;0106a245d0e8:40501 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T07:58:31,617 INFO [RS:0;0106a245d0e8:40501 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T07:58:31,617 INFO [RS:0;0106a245d0e8:40501 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T07:58:31,618 INFO [RS:0;0106a245d0e8:40501 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T07:58:31,618 INFO [RS:0;0106a245d0e8:40501 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T07:58:31,618 DEBUG [RS:0;0106a245d0e8:40501 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:58:31,618 DEBUG [RS:0;0106a245d0e8:40501 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:58:31,618 DEBUG [RS:0;0106a245d0e8:40501 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:58:31,618 DEBUG [RS:0;0106a245d0e8:40501 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:58:31,618 DEBUG [RS:0;0106a245d0e8:40501 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:58:31,618 DEBUG [RS:0;0106a245d0e8:40501 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0106a245d0e8:0, corePoolSize=2, maxPoolSize=2 2024-12-08T07:58:31,619 DEBUG [RS:0;0106a245d0e8:40501 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:58:31,619 DEBUG [RS:0;0106a245d0e8:40501 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:58:31,619 DEBUG [RS:0;0106a245d0e8:40501 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:58:31,619 DEBUG [RS:0;0106a245d0e8:40501 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:58:31,619 DEBUG [RS:0;0106a245d0e8:40501 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:58:31,619 DEBUG [RS:0;0106a245d0e8:40501 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:58:31,619 DEBUG [RS:0;0106a245d0e8:40501 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0106a245d0e8:0, corePoolSize=3, maxPoolSize=3 2024-12-08T07:58:31,619 DEBUG [RS:0;0106a245d0e8:40501 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0, corePoolSize=3, maxPoolSize=3 2024-12-08T07:58:31,620 INFO [RS:0;0106a245d0e8:40501 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T07:58:31,620 INFO [RS:0;0106a245d0e8:40501 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T07:58:31,620 INFO [RS:0;0106a245d0e8:40501 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T07:58:31,620 INFO [RS:0;0106a245d0e8:40501 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T07:58:31,620 INFO [RS:0;0106a245d0e8:40501 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T07:58:31,620 INFO [RS:0;0106a245d0e8:40501 {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,40501,1733644711128-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T07:58:31,640 INFO [RS:0;0106a245d0e8:40501 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T07:58:31,640 INFO [RS:0;0106a245d0e8:40501 {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,40501,1733644711128-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T07:58:31,640 INFO [RS:0;0106a245d0e8:40501 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T07:58:31,640 INFO [RS:0;0106a245d0e8:40501 {}] regionserver.Replication(171): 0106a245d0e8,40501,1733644711128 started 2024-12-08T07:58:31,657 INFO [RS:0;0106a245d0e8:40501 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T07:58:31,657 INFO [RS:0;0106a245d0e8:40501 {}] regionserver.HRegionServer(1482): Serving as 0106a245d0e8,40501,1733644711128, RpcServer on 0106a245d0e8/172.17.0.2:40501, sessionid=0x100046efac30001 2024-12-08T07:58:31,657 DEBUG [RS:0;0106a245d0e8:40501 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T07:58:31,657 DEBUG [RS:0;0106a245d0e8:40501 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0106a245d0e8,40501,1733644711128 2024-12-08T07:58:31,657 DEBUG [RS:0;0106a245d0e8:40501 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0106a245d0e8,40501,1733644711128' 2024-12-08T07:58:31,658 DEBUG [RS:0;0106a245d0e8:40501 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T07:58:31,658 DEBUG [RS:0;0106a245d0e8:40501 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T07:58:31,658 DEBUG [RS:0;0106a245d0e8:40501 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T07:58:31,658 DEBUG [RS:0;0106a245d0e8:40501 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T07:58:31,658 DEBUG [RS:0;0106a245d0e8:40501 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0106a245d0e8,40501,1733644711128 2024-12-08T07:58:31,658 DEBUG [RS:0;0106a245d0e8:40501 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0106a245d0e8,40501,1733644711128' 2024-12-08T07:58:31,658 DEBUG [RS:0;0106a245d0e8:40501 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T07:58:31,659 DEBUG [RS:0;0106a245d0e8:40501 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T07:58:31,659 DEBUG [RS:0;0106a245d0e8:40501 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T07:58:31,659 INFO [RS:0;0106a245d0e8:40501 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T07:58:31,659 INFO [RS:0;0106a245d0e8:40501 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T07:58:31,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:31,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:31,732 WARN [0106a245d0e8:35031 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-08T07:58:31,761 INFO [RS:0;0106a245d0e8:40501 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0106a245d0e8%2C40501%2C1733644711128, suffix=, logDir=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128, archiveDir=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/oldWALs, maxLogs=32 2024-12-08T07:58:31,762 INFO [RS:0;0106a245d0e8:40501 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C40501%2C1733644711128.1733644711762 2024-12-08T07:58:31,768 INFO [RS:0;0106a245d0e8:40501 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644711762 2024-12-08T07:58:31,769 DEBUG [RS:0;0106a245d0e8:40501 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45039:45039),(127.0.0.1/127.0.0.1:33601:33601)] 2024-12-08T07:58:31,982 DEBUG [0106a245d0e8:35031 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-08T07:58:31,983 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0106a245d0e8,40501,1733644711128 2024-12-08T07:58:31,984 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0106a245d0e8,40501,1733644711128, state=OPENING 2024-12-08T07:58:32,090 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-08T07:58:32,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40501-0x100046efac30001, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:58:32,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35031-0x100046efac30000, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:58:32,102 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T07:58:32,102 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T07:58:32,102 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T07:58:32,102 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=0106a245d0e8,40501,1733644711128}] 2024-12-08T07:58:32,257 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-08T07:58:32,259 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44423, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-08T07:58:32,265 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-08T07:58:32,265 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T07:58:32,267 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0106a245d0e8%2C40501%2C1733644711128.meta, suffix=.meta, logDir=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128, archiveDir=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/oldWALs, maxLogs=32 2024-12-08T07:58:32,267 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C40501%2C1733644711128.meta.1733644712267.meta 2024-12-08T07:58:32,274 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.meta.1733644712267.meta 2024-12-08T07:58:32,275 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33601:33601),(127.0.0.1/127.0.0.1:45039:45039)] 2024-12-08T07:58:32,276 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-08T07:58:32,276 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-08T07:58:32,276 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-08T07:58:32,276 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-08T07:58:32,276 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-08T07:58:32,276 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T07:58:32,276 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-08T07:58:32,276 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-08T07:58:32,278 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T07:58:32,278 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T07:58:32,279 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:58:32,279 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:58:32,279 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T07:58:32,280 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T07:58:32,280 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:58:32,280 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:58:32,281 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T07:58:32,281 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T07:58:32,282 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:58:32,282 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:58:32,282 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T07:58:32,283 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T07:58:32,283 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:58:32,284 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:58:32,284 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T07:58:32,284 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/data/hbase/meta/1588230740 2024-12-08T07:58:32,286 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/data/hbase/meta/1588230740 2024-12-08T07:58:32,287 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T07:58:32,287 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T07:58:32,287 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T07:58:32,289 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T07:58:32,290 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=697316, jitterRate=-0.11331693828105927}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T07:58:32,290 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-08T07:58:32,291 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733644712277Writing region info on filesystem at 1733644712277Initializing all the Stores at 1733644712277Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644712277Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644712278 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644712278Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644712278Cleaning up temporary data from old regions at 1733644712287 (+9 ms)Running coprocessor post-open hooks at 1733644712290 (+3 ms)Region opened successfully at 1733644712291 (+1 ms) 2024-12-08T07:58:32,292 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733644712257 2024-12-08T07:58:32,294 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-08T07:58:32,294 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-08T07:58:32,295 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0106a245d0e8,40501,1733644711128 2024-12-08T07:58:32,297 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0106a245d0e8,40501,1733644711128, state=OPEN 2024-12-08T07:58:32,331 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35031-0x100046efac30000, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T07:58:32,331 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40501-0x100046efac30001, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T07:58:32,331 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=0106a245d0e8,40501,1733644711128 2024-12-08T07:58:32,331 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T07:58:32,331 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T07:58:32,335 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-08T07:58:32,335 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=0106a245d0e8,40501,1733644711128 in 229 msec 2024-12-08T07:58:32,338 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-08T07:58:32,338 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 757 msec 2024-12-08T07:58:32,339 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T07:58:32,339 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-08T07:58:32,340 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T07:58:32,340 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0106a245d0e8,40501,1733644711128, seqNum=-1] 2024-12-08T07:58:32,341 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T07:58:32,342 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49581, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T07:58:32,350 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 818 msec 2024-12-08T07:58:32,350 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733644712350, completionTime=-1 2024-12-08T07:58:32,350 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-08T07:58:32,350 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-08T07:58:32,352 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-08T07:58:32,352 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733644772352 2024-12-08T07:58:32,352 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733644832352 2024-12-08T07:58:32,352 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-08T07:58:32,353 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,35031,1733644710922-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T07:58:32,353 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,35031,1733644710922-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T07:58:32,353 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,35031,1733644710922-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T07:58:32,353 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0106a245d0e8:35031, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T07:58:32,353 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-08T07:58:32,353 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-08T07:58:32,355 DEBUG [master/0106a245d0e8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-08T07:58:32,357 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.151sec 2024-12-08T07:58:32,358 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-08T07:58:32,358 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-08T07:58:32,358 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-08T07:58:32,358 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-08T07:58:32,358 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-08T07:58:32,358 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,35031,1733644710922-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T07:58:32,358 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,35031,1733644710922-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-08T07:58:32,358 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30b81225, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T07:58:32,359 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 0106a245d0e8,35031,-1 for getting cluster id 2024-12-08T07:58:32,359 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-08T07:58:32,360 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd1b90d0b-5109-41e6-9729-0bce1ce4cd51' 2024-12-08T07:58:32,361 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-08T07:58:32,361 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-08T07:58:32,361 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,35031,1733644710922-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T07:58:32,361 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-08T07:58:32,361 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d1b90d0b-5109-41e6-9729-0bce1ce4cd51" 2024-12-08T07:58:32,361 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ec68ef3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T07:58:32,361 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [0106a245d0e8,35031,-1] 2024-12-08T07:58:32,361 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-08T07:58:32,365 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T07:58:32,366 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44546, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-08T07:58:32,367 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5282eca5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T07:58:32,367 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T07:58:32,369 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0106a245d0e8,40501,1733644711128, seqNum=-1] 2024-12-08T07:58:32,369 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T07:58:32,371 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56342, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T07:58:32,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=0106a245d0e8,35031,1733644710922 2024-12-08T07:58:32,373 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:58:32,375 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-08T07:58:32,375 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-12-08T07:58:32,375 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-12-08T07:58:32,376 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-08T07:58:32,377 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 0106a245d0e8,35031,1733644710922 2024-12-08T07:58:32,377 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1b84819d 2024-12-08T07:58:32,377 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T07:58:32,379 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44558, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T07:58:32,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35031 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-08T07:58:32,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35031 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-08T07:58:32,379 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35031 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T07:58:32,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35031 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-12-08T07:58:32,382 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T07:58:32,382 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:58:32,383 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35031 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-12-08T07:58:32,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35031 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T07:58:32,384 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T07:58:32,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42133 is added to blk_1073741835_1011 (size=395) 2024-12-08T07:58:32,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34575 is added to blk_1073741835_1011 (size=395) 2024-12-08T07:58:32,393 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 3da59edd4c184fada39db32f0a7e9e0c, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733644712379.3da59edd4c184fada39db32f0a7e9e0c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42 2024-12-08T07:58:32,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34575 is added to blk_1073741836_1012 (size=78) 2024-12-08T07:58:32,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42133 is added to blk_1073741836_1012 (size=78) 2024-12-08T07:58:32,400 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733644712379.3da59edd4c184fada39db32f0a7e9e0c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T07:58:32,400 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 3da59edd4c184fada39db32f0a7e9e0c, disabling compactions & flushes 2024-12-08T07:58:32,400 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733644712379.3da59edd4c184fada39db32f0a7e9e0c. 2024-12-08T07:58:32,400 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733644712379.3da59edd4c184fada39db32f0a7e9e0c. 2024-12-08T07:58:32,400 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733644712379.3da59edd4c184fada39db32f0a7e9e0c. after waiting 0 ms 2024-12-08T07:58:32,400 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733644712379.3da59edd4c184fada39db32f0a7e9e0c. 2024-12-08T07:58:32,400 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733644712379.3da59edd4c184fada39db32f0a7e9e0c. 2024-12-08T07:58:32,400 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 3da59edd4c184fada39db32f0a7e9e0c: Waiting for close lock at 1733644712400Disabling compacts and flushes for region at 1733644712400Disabling writes for close at 1733644712400Writing region close event to WAL at 1733644712400Closed at 1733644712400 2024-12-08T07:58:32,402 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T07:58:32,402 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1733644712379.3da59edd4c184fada39db32f0a7e9e0c.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1733644712402"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733644712402"}]},"ts":"1733644712402"} 2024-12-08T07:58:32,404 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-08T07:58:32,406 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T07:58:32,406 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733644712406"}]},"ts":"1733644712406"} 2024-12-08T07:58:32,409 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-12-08T07:58:32,409 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=3da59edd4c184fada39db32f0a7e9e0c, ASSIGN}] 2024-12-08T07:58:32,410 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=3da59edd4c184fada39db32f0a7e9e0c, ASSIGN 2024-12-08T07:58:32,411 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=3da59edd4c184fada39db32f0a7e9e0c, ASSIGN; state=OFFLINE, location=0106a245d0e8,40501,1733644711128; forceNewPlan=false, retain=false 2024-12-08T07:58:32,562 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3da59edd4c184fada39db32f0a7e9e0c, regionState=OPENING, regionLocation=0106a245d0e8,40501,1733644711128 2024-12-08T07:58:32,565 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=3da59edd4c184fada39db32f0a7e9e0c, ASSIGN because future has completed 2024-12-08T07:58:32,566 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3da59edd4c184fada39db32f0a7e9e0c, server=0106a245d0e8,40501,1733644711128}] 2024-12-08T07:58:32,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:32,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:32,724 INFO [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1733644712379.3da59edd4c184fada39db32f0a7e9e0c. 2024-12-08T07:58:32,724 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 3da59edd4c184fada39db32f0a7e9e0c, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733644712379.3da59edd4c184fada39db32f0a7e9e0c.', STARTKEY => '', ENDKEY => ''} 2024-12-08T07:58:32,724 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 3da59edd4c184fada39db32f0a7e9e0c 2024-12-08T07:58:32,724 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733644712379.3da59edd4c184fada39db32f0a7e9e0c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T07:58:32,724 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 3da59edd4c184fada39db32f0a7e9e0c 2024-12-08T07:58:32,725 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 3da59edd4c184fada39db32f0a7e9e0c 2024-12-08T07:58:32,726 INFO [StoreOpener-3da59edd4c184fada39db32f0a7e9e0c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 3da59edd4c184fada39db32f0a7e9e0c 2024-12-08T07:58:32,727 INFO [StoreOpener-3da59edd4c184fada39db32f0a7e9e0c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3da59edd4c184fada39db32f0a7e9e0c columnFamilyName info 2024-12-08T07:58:32,727 DEBUG [StoreOpener-3da59edd4c184fada39db32f0a7e9e0c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:58:32,728 INFO [StoreOpener-3da59edd4c184fada39db32f0a7e9e0c-1 {}] regionserver.HStore(327): Store=3da59edd4c184fada39db32f0a7e9e0c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T07:58:32,728 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 3da59edd4c184fada39db32f0a7e9e0c 2024-12-08T07:58:32,729 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/data/default/TestLogRolling-testLogRollOnPipelineRestart/3da59edd4c184fada39db32f0a7e9e0c 2024-12-08T07:58:32,729 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/data/default/TestLogRolling-testLogRollOnPipelineRestart/3da59edd4c184fada39db32f0a7e9e0c 2024-12-08T07:58:32,730 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 3da59edd4c184fada39db32f0a7e9e0c 2024-12-08T07:58:32,730 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 3da59edd4c184fada39db32f0a7e9e0c 2024-12-08T07:58:32,732 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 3da59edd4c184fada39db32f0a7e9e0c 2024-12-08T07:58:32,734 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/data/default/TestLogRolling-testLogRollOnPipelineRestart/3da59edd4c184fada39db32f0a7e9e0c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T07:58:32,735 INFO [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 3da59edd4c184fada39db32f0a7e9e0c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=852074, jitterRate=0.08346901834011078}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-08T07:58:32,735 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3da59edd4c184fada39db32f0a7e9e0c 2024-12-08T07:58:32,736 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 3da59edd4c184fada39db32f0a7e9e0c: Running coprocessor pre-open hook at 1733644712725Writing region info on filesystem at 1733644712725Initializing all the Stores at 1733644712726 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644712726Cleaning up temporary data from old regions at 1733644712730 (+4 ms)Running coprocessor post-open hooks at 1733644712735 (+5 ms)Region opened successfully at 1733644712736 (+1 ms) 2024-12-08T07:58:32,737 INFO [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1733644712379.3da59edd4c184fada39db32f0a7e9e0c., pid=6, masterSystemTime=1733644712720 2024-12-08T07:58:32,740 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1733644712379.3da59edd4c184fada39db32f0a7e9e0c. 2024-12-08T07:58:32,740 INFO [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1733644712379.3da59edd4c184fada39db32f0a7e9e0c. 2024-12-08T07:58:32,741 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3da59edd4c184fada39db32f0a7e9e0c, regionState=OPEN, openSeqNum=2, regionLocation=0106a245d0e8,40501,1733644711128 2024-12-08T07:58:32,743 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3da59edd4c184fada39db32f0a7e9e0c, server=0106a245d0e8,40501,1733644711128 because future has completed 2024-12-08T07:58:32,747 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-08T07:58:32,747 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 3da59edd4c184fada39db32f0a7e9e0c, server=0106a245d0e8,40501,1733644711128 in 178 msec 2024-12-08T07:58:32,750 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-08T07:58:32,750 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=3da59edd4c184fada39db32f0a7e9e0c, ASSIGN in 338 msec 2024-12-08T07:58:32,751 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T07:58:32,751 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733644712751"}]},"ts":"1733644712751"} 2024-12-08T07:58:32,754 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-12-08T07:58:32,755 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T07:58:32,757 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 376 msec 2024-12-08T07:58:33,672 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-08T07:58:33,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:33,692 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:58:33,692 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:58:33,692 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:58:33,693 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:58:33,693 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:58:33,693 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:58:33,696 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:58:33,696 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:58:33,696 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:58:33,698 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:58:33,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:34,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:34,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:35,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:35,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:36,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:36,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:37,615 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-08T07:58:37,615 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-12-08T07:58:37,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:37,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:38,168 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-08T07:58:38,168 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-08T07:58:38,169 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-08T07:58:38,169 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-12-08T07:58:38,169 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T07:58:38,169 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-08T07:58:38,170 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-08T07:58:38,170 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-08T07:58:38,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:38,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:39,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:39,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:40,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:40,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:41,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:41,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:42,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35031 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T07:58:42,461 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-12-08T07:58:42,461 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-12-08T07:58:42,463 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-12-08T07:58:42,464 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1733644712379.3da59edd4c184fada39db32f0a7e9e0c. 2024-12-08T07:58:42,467 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1733644712379.3da59edd4c184fada39db32f0a7e9e0c., hostname=0106a245d0e8,40501,1733644711128, seqNum=2] 2024-12-08T07:58:42,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:42,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:43,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:43,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:44,471 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644711762 2024-12-08T07:58:44,471 WARN [ResponseProcessor for block BP-610407669-172.17.0.2-1733644708587:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-610407669-172.17.0.2-1733644708587:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:44,471 WARN [ResponseProcessor for block BP-610407669-172.17.0.2-1733644708587:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-610407669-172.17.0.2-1733644708587:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:44,471 WARN [ResponseProcessor for block BP-610407669-172.17.0.2-1733644708587:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-610407669-172.17.0.2-1733644708587:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-610407669-172.17.0.2-1733644708587:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:42133,DS-bb5c09d6-5850-4601-95c1-c11917a34e29,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:44,472 WARN [DataStreamer for file /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.meta.1733644712267.meta block BP-610407669-172.17.0.2-1733644708587:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-610407669-172.17.0.2-1733644708587:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42133,DS-bb5c09d6-5850-4601-95c1-c11917a34e29,DISK], DatanodeInfoWithStorage[127.0.0.1:34575,DS-5d557bf9-4278-493d-a6cf-d2b9043b3d49,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42133,DS-bb5c09d6-5850-4601-95c1-c11917a34e29,DISK]) is bad. 2024-12-08T07:58:44,472 WARN [DataStreamer for file /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/WALs/0106a245d0e8,35031,1733644710922/0106a245d0e8%2C35031%2C1733644710922.1733644711278 block BP-610407669-172.17.0.2-1733644708587:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-610407669-172.17.0.2-1733644708587:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42133,DS-bb5c09d6-5850-4601-95c1-c11917a34e29,DISK], DatanodeInfoWithStorage[127.0.0.1:34575,DS-5d557bf9-4278-493d-a6cf-d2b9043b3d49,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42133,DS-bb5c09d6-5850-4601-95c1-c11917a34e29,DISK]) is bad. 2024-12-08T07:58:44,472 WARN [DataStreamer for file /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644711762 block BP-610407669-172.17.0.2-1733644708587:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-610407669-172.17.0.2-1733644708587:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34575,DS-5d557bf9-4278-493d-a6cf-d2b9043b3d49,DISK], DatanodeInfoWithStorage[127.0.0.1:42133,DS-bb5c09d6-5850-4601-95c1-c11917a34e29,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42133,DS-bb5c09d6-5850-4601-95c1-c11917a34e29,DISK]) is bad. 2024-12-08T07:58:44,472 WARN [PacketResponder: BP-610407669-172.17.0.2-1733644708587:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:42133] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:44,473 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-300342028_22 at /127.0.0.1:46794 [Receiving block BP-610407669-172.17.0.2-1733644708587:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:42133:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46794 dst: /127.0.0.1:42133 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:44,473 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1966724327_22 at /127.0.0.1:46814 [Receiving block BP-610407669-172.17.0.2-1733644708587:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:42133:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46814 dst: /127.0.0.1:42133 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:44,473 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1966724327_22 at /127.0.0.1:46474 [Receiving block BP-610407669-172.17.0.2-1733644708587:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34575:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46474 dst: /127.0.0.1:34575 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:44,474 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-300342028_22 at /127.0.0.1:46432 [Receiving block BP-610407669-172.17.0.2-1733644708587:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34575:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46432 dst: /127.0.0.1:34575 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:44,474 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1966724327_22 at /127.0.0.1:46490 [Receiving block BP-610407669-172.17.0.2-1733644708587:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34575:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46490 dst: /127.0.0.1:34575 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:44,474 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1966724327_22 at /127.0.0.1:46812 [Receiving block BP-610407669-172.17.0.2-1733644708587:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:42133:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46812 dst: /127.0.0.1:42133 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:44,563 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@27a17641{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:58:44,564 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@62cd82cb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T07:58:44,564 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T07:58:44,564 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1b91ed3f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T07:58:44,564 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1118a265{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/hadoop.log.dir/,STOPPED} 2024-12-08T07:58:44,566 WARN [BP-610407669-172.17.0.2-1733644708587 heartbeating to localhost/127.0.0.1:45485 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T07:58:44,566 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T07:58:44,566 WARN [BP-610407669-172.17.0.2-1733644708587 heartbeating to localhost/127.0.0.1:45485 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-610407669-172.17.0.2-1733644708587 (Datanode Uuid cc6fc935-c807-410e-9199-7ae51bb9da38) service to localhost/127.0.0.1:45485 2024-12-08T07:58:44,566 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T07:58:44,567 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/cluster_c806b1b6-4479-4c06-7f25-b09e8cb54243/data/data3/current/BP-610407669-172.17.0.2-1733644708587 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T07:58:44,567 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/cluster_c806b1b6-4479-4c06-7f25-b09e8cb54243/data/data4/current/BP-610407669-172.17.0.2-1733644708587 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T07:58:44,567 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T07:58:44,575 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T07:58:44,579 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T07:58:44,580 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T07:58:44,580 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T07:58:44,580 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T07:58:44,585 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46332285{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/hadoop.log.dir/,AVAILABLE} 2024-12-08T07:58:44,585 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e793ffb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T07:58:44,680 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@45c55ac5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/java.io.tmpdir/jetty-localhost-39869-hadoop-hdfs-3_4_1-tests_jar-_-any-13952894022513682664/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:58:44,681 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@10e70018{HTTP/1.1, (http/1.1)}{localhost:39869} 2024-12-08T07:58:44,681 INFO [Time-limited test {}] server.Server(415): Started @172249ms 2024-12-08T07:58:44,682 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T07:58:44,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:44,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:44,713 WARN [ResponseProcessor for block BP-610407669-172.17.0.2-1733644708587:blk_1073741834_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-610407669-172.17.0.2-1733644708587:blk_1073741834_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:44,713 WARN [ResponseProcessor for block BP-610407669-172.17.0.2-1733644708587:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-610407669-172.17.0.2-1733644708587:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:44,713 WARN [ResponseProcessor for block BP-610407669-172.17.0.2-1733644708587:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-610407669-172.17.0.2-1733644708587:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:44,713 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1966724327_22 at /127.0.0.1:55380 [Receiving block BP-610407669-172.17.0.2-1733644708587:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34575:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55380 dst: /127.0.0.1:34575 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:44,714 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-300342028_22 at /127.0.0.1:55384 [Receiving block BP-610407669-172.17.0.2-1733644708587:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34575:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55384 dst: /127.0.0.1:34575 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:44,714 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1966724327_22 at /127.0.0.1:55382 [Receiving block BP-610407669-172.17.0.2-1733644708587:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34575:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55382 dst: /127.0.0.1:34575 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:44,716 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4a16b3f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:58:44,716 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@27ffc774{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T07:58:44,716 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T07:58:44,716 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5d6935b5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T07:58:44,716 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@34c1099f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/hadoop.log.dir/,STOPPED} 2024-12-08T07:58:44,718 WARN [BP-610407669-172.17.0.2-1733644708587 heartbeating to localhost/127.0.0.1:45485 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T07:58:44,718 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T07:58:44,718 WARN [BP-610407669-172.17.0.2-1733644708587 heartbeating to localhost/127.0.0.1:45485 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-610407669-172.17.0.2-1733644708587 (Datanode Uuid 86420e16-6872-4605-8a97-c126c7de6b47) service to localhost/127.0.0.1:45485 2024-12-08T07:58:44,718 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T07:58:44,718 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/cluster_c806b1b6-4479-4c06-7f25-b09e8cb54243/data/data1/current/BP-610407669-172.17.0.2-1733644708587 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T07:58:44,719 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/cluster_c806b1b6-4479-4c06-7f25-b09e8cb54243/data/data2/current/BP-610407669-172.17.0.2-1733644708587 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T07:58:44,719 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T07:58:44,742 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T07:58:44,745 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T07:58:44,749 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T07:58:44,749 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T07:58:44,749 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T07:58:44,749 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@29d5ccf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/hadoop.log.dir/,AVAILABLE} 2024-12-08T07:58:44,750 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7939cb3e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T07:58:44,850 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1dad3af2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/java.io.tmpdir/jetty-localhost-44469-hadoop-hdfs-3_4_1-tests_jar-_-any-16199288524057297021/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:58:44,851 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4307cd3{HTTP/1.1, (http/1.1)}{localhost:44469} 2024-12-08T07:58:44,851 INFO [Time-limited test {}] server.Server(415): Started @172419ms 2024-12-08T07:58:44,852 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T07:58:45,338 WARN [Thread-1335 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T07:58:45,341 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd05393145b41f63d with lease ID 0x8383db874910442b: from storage DS-bb5c09d6-5850-4601-95c1-c11917a34e29 node DatanodeRegistration(127.0.0.1:36427, datanodeUuid=cc6fc935-c807-410e-9199-7ae51bb9da38, infoPort=43691, infoSecurePort=0, ipcPort=45209, storageInfo=lv=-57;cid=testClusterID;nsid=1415552170;c=1733644708587), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-08T07:58:45,341 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd05393145b41f63d with lease ID 0x8383db874910442b: from storage DS-9edc9754-1f83-4f0f-a411-f4b5f5c5cfce node DatanodeRegistration(127.0.0.1:36427, datanodeUuid=cc6fc935-c807-410e-9199-7ae51bb9da38, infoPort=43691, infoSecurePort=0, ipcPort=45209, storageInfo=lv=-57;cid=testClusterID;nsid=1415552170;c=1733644708587), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:58:45,432 WARN [Thread-1355 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T07:58:45,434 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd9c3bbd75d3f4232 with lease ID 0x8383db874910442c: from storage DS-5d557bf9-4278-493d-a6cf-d2b9043b3d49 node DatanodeRegistration(127.0.0.1:44553, datanodeUuid=86420e16-6872-4605-8a97-c126c7de6b47, infoPort=34691, infoSecurePort=0, ipcPort=44823, storageInfo=lv=-57;cid=testClusterID;nsid=1415552170;c=1733644708587), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:58:45,434 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd9c3bbd75d3f4232 with lease ID 0x8383db874910442c: from storage DS-ed672161-3e84-4f00-871c-69c59f527d43 node DatanodeRegistration(127.0.0.1:44553, datanodeUuid=86420e16-6872-4605-8a97-c126c7de6b47, infoPort=34691, infoSecurePort=0, ipcPort=44823, storageInfo=lv=-57;cid=testClusterID;nsid=1415552170;c=1733644708587), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:58:45,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:45,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:45,882 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-12-08T07:58:45,884 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-12-08T07:58:45,891 ERROR [FSHLog-0-hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42-prefix:0106a245d0e8,40501,1733644711128 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34575,DS-5d557bf9-4278-493d-a6cf-d2b9043b3d49,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:45,891 WARN [FSHLog-0-hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42-prefix:0106a245d0e8,40501,1733644711128 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34575,DS-5d557bf9-4278-493d-a6cf-d2b9043b3d49,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:45,891 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0106a245d0e8%2C40501%2C1733644711128:(num 1733644711762) roll requested 2024-12-08T07:58:45,891 INFO [regionserver/0106a245d0e8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C40501%2C1733644711128.1733644725891 2024-12-08T07:58:45,896 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644711762 newFile=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644725891 2024-12-08T07:58:45,897 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:45,897 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:45,897 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:45,897 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:45,897 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:45,897 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644711762 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644725891 2024-12-08T07:58:45,897 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34575,DS-5d557bf9-4278-493d-a6cf-d2b9043b3d49,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:45,898 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34575,DS-5d557bf9-4278-493d-a6cf-d2b9043b3d49,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:45,898 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644711762 2024-12-08T07:58:45,898 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34691:34691),(127.0.0.1/127.0.0.1:43691:43691)] 2024-12-08T07:58:45,898 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644711762 is not closed yet, will try archiving it next time 2024-12-08T07:58:45,898 WARN [IPC Server handler 2 on default port 45485 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644711762 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-12-08T07:58:45,898 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644711762 after 0ms 2024-12-08T07:58:46,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:46,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:47,340 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-08T07:58:47,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:47,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:47,902 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-12-08T07:58:48,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:48,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:49,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:49,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:49,899 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644711762 after 4001ms 2024-12-08T07:58:49,905 WARN [ResponseProcessor for block BP-610407669-172.17.0.2-1733644708587:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-610407669-172.17.0.2-1733644708587:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:49,906 WARN [DataStreamer for file /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644725891 block BP-610407669-172.17.0.2-1733644708587:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-610407669-172.17.0.2-1733644708587:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44553,DS-5d557bf9-4278-493d-a6cf-d2b9043b3d49,DISK], DatanodeInfoWithStorage[127.0.0.1:36427,DS-bb5c09d6-5850-4601-95c1-c11917a34e29,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44553,DS-5d557bf9-4278-493d-a6cf-d2b9043b3d49,DISK]) is bad. 2024-12-08T07:58:49,906 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1966724327_22 at /127.0.0.1:43908 [Receiving block BP-610407669-172.17.0.2-1733644708587:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:36427:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43908 dst: /127.0.0.1:36427 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:49,906 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1966724327_22 at /127.0.0.1:36092 [Receiving block BP-610407669-172.17.0.2-1733644708587:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:44553:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36092 dst: /127.0.0.1:44553 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:49,952 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1dad3af2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:58:49,953 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4307cd3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T07:58:49,953 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T07:58:49,953 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7939cb3e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T07:58:49,953 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@29d5ccf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/hadoop.log.dir/,STOPPED} 2024-12-08T07:58:49,957 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T07:58:49,957 WARN [BP-610407669-172.17.0.2-1733644708587 heartbeating to localhost/127.0.0.1:45485 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T07:58:49,957 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T07:58:49,957 WARN [BP-610407669-172.17.0.2-1733644708587 heartbeating to localhost/127.0.0.1:45485 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-610407669-172.17.0.2-1733644708587 (Datanode Uuid 86420e16-6872-4605-8a97-c126c7de6b47) service to localhost/127.0.0.1:45485 2024-12-08T07:58:49,958 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/cluster_c806b1b6-4479-4c06-7f25-b09e8cb54243/data/data1/current/BP-610407669-172.17.0.2-1733644708587 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T07:58:49,958 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/cluster_c806b1b6-4479-4c06-7f25-b09e8cb54243/data/data2/current/BP-610407669-172.17.0.2-1733644708587 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T07:58:49,959 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T07:58:49,977 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T07:58:49,982 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T07:58:49,985 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T07:58:49,985 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T07:58:49,985 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T07:58:49,986 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@58c81822{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/hadoop.log.dir/,AVAILABLE} 2024-12-08T07:58:49,987 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a15b38f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T07:58:50,120 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@647c40ef{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/java.io.tmpdir/jetty-localhost-35301-hadoop-hdfs-3_4_1-tests_jar-_-any-9100170144688642556/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:58:50,120 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@38ca15c7{HTTP/1.1, (http/1.1)}{localhost:35301} 2024-12-08T07:58:50,120 INFO [Time-limited test {}] server.Server(415): Started @177688ms 2024-12-08T07:58:50,122 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T07:58:50,165 WARN [ResponseProcessor for block BP-610407669-172.17.0.2-1733644708587:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-610407669-172.17.0.2-1733644708587:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:50,166 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1966724327_22 at /127.0.0.1:54174 [Receiving block BP-610407669-172.17.0.2-1733644708587:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:36427:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54174 dst: /127.0.0.1:36427 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:50,178 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@45c55ac5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:58:50,178 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@10e70018{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T07:58:50,178 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T07:58:50,178 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e793ffb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T07:58:50,179 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46332285{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/hadoop.log.dir/,STOPPED} 2024-12-08T07:58:50,180 WARN [BP-610407669-172.17.0.2-1733644708587 heartbeating to localhost/127.0.0.1:45485 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T07:58:50,180 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T07:58:50,180 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T07:58:50,180 WARN [BP-610407669-172.17.0.2-1733644708587 heartbeating to localhost/127.0.0.1:45485 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-610407669-172.17.0.2-1733644708587 (Datanode Uuid cc6fc935-c807-410e-9199-7ae51bb9da38) service to localhost/127.0.0.1:45485 2024-12-08T07:58:50,181 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/cluster_c806b1b6-4479-4c06-7f25-b09e8cb54243/data/data3/current/BP-610407669-172.17.0.2-1733644708587 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T07:58:50,181 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/cluster_c806b1b6-4479-4c06-7f25-b09e8cb54243/data/data4/current/BP-610407669-172.17.0.2-1733644708587 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T07:58:50,181 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T07:58:50,197 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T07:58:50,204 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T07:58:50,209 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T07:58:50,209 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T07:58:50,209 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T07:58:50,211 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7202b0aa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/hadoop.log.dir/,AVAILABLE} 2024-12-08T07:58:50,211 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@66269315{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T07:58:50,315 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5e666703{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/java.io.tmpdir/jetty-localhost-40367-hadoop-hdfs-3_4_1-tests_jar-_-any-1921753785056040687/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:58:50,316 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2dd41fe4{HTTP/1.1, (http/1.1)}{localhost:40367} 2024-12-08T07:58:50,316 INFO [Time-limited test {}] server.Server(415): Started @177884ms 2024-12-08T07:58:50,318 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T07:58:50,609 WARN [Thread-1409 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T07:58:50,613 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2668dd739b32d3bb with lease ID 0x8383db874910442d: from storage DS-5d557bf9-4278-493d-a6cf-d2b9043b3d49 node DatanodeRegistration(127.0.0.1:37851, datanodeUuid=86420e16-6872-4605-8a97-c126c7de6b47, infoPort=46359, infoSecurePort=0, ipcPort=43047, storageInfo=lv=-57;cid=testClusterID;nsid=1415552170;c=1733644708587), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:58:50,613 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2668dd739b32d3bb with lease ID 0x8383db874910442d: from storage DS-ed672161-3e84-4f00-871c-69c59f527d43 node DatanodeRegistration(127.0.0.1:37851, datanodeUuid=86420e16-6872-4605-8a97-c126c7de6b47, infoPort=46359, infoSecurePort=0, ipcPort=43047, storageInfo=lv=-57;cid=testClusterID;nsid=1415552170;c=1733644708587), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:58:50,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:50,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:50,843 WARN [Thread-1429 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T07:58:50,846 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf30c9abf332e6577 with lease ID 0x8383db874910442e: from storage DS-bb5c09d6-5850-4601-95c1-c11917a34e29 node DatanodeRegistration(127.0.0.1:38357, datanodeUuid=cc6fc935-c807-410e-9199-7ae51bb9da38, infoPort=34909, infoSecurePort=0, ipcPort=42119, storageInfo=lv=-57;cid=testClusterID;nsid=1415552170;c=1733644708587), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:58:50,846 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf30c9abf332e6577 with lease ID 0x8383db874910442e: from storage DS-9edc9754-1f83-4f0f-a411-f4b5f5c5cfce node DatanodeRegistration(127.0.0.1:38357, datanodeUuid=cc6fc935-c807-410e-9199-7ae51bb9da38, infoPort=34909, infoSecurePort=0, ipcPort=42119, storageInfo=lv=-57;cid=testClusterID;nsid=1415552170;c=1733644708587), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:58:51,405 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-12-08T07:58:51,407 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-12-08T07:58:51,409 ERROR [FSHLog-0-hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42-prefix:0106a245d0e8,40501,1733644711128 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36427,DS-bb5c09d6-5850-4601-95c1-c11917a34e29,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:51,409 WARN [FSHLog-0-hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42-prefix:0106a245d0e8,40501,1733644711128 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36427,DS-bb5c09d6-5850-4601-95c1-c11917a34e29,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:51,409 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0106a245d0e8%2C40501%2C1733644711128:(num 1733644725891) roll requested 2024-12-08T07:58:51,410 INFO [regionserver/0106a245d0e8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C40501%2C1733644711128.1733644731409 2024-12-08T07:58:51,415 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644725891 newFile=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644731409 2024-12-08T07:58:51,415 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:51,416 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:51,416 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:51,416 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:51,416 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:51,416 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644725891 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644731409 2024-12-08T07:58:51,416 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36427,DS-bb5c09d6-5850-4601-95c1-c11917a34e29,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:51,416 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36427,DS-bb5c09d6-5850-4601-95c1-c11917a34e29,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:51,417 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644725891 2024-12-08T07:58:51,417 WARN [IPC Server handler 0 on default port 45485 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644725891 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-12-08T07:58:51,417 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644725891 after 0ms 2024-12-08T07:58:51,421 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46359:46359),(127.0.0.1/127.0.0.1:34909:34909)] 2024-12-08T07:58:51,421 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644725891 is not closed yet, will try archiving it next time 2024-12-08T07:58:51,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:51,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:52,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:52,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:53,423 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C40501%2C1733644711128.1733644733423 2024-12-08T07:58:53,434 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644731409 newFile=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644733423 2024-12-08T07:58:53,435 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:53,435 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:53,435 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:53,435 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:53,435 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:53,436 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644731409 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644733423 2024-12-08T07:58:53,437 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46359:46359),(127.0.0.1/127.0.0.1:34909:34909)] 2024-12-08T07:58:53,437 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644725891 is not closed yet, will try archiving it next time 2024-12-08T07:58:53,437 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644731409 is not closed yet, will try archiving it next time 2024-12-08T07:58:53,437 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644711762 2024-12-08T07:58:53,437 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644711762 2024-12-08T07:58:53,437 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644711762 after 0ms 2024-12-08T07:58:53,438 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644711762 2024-12-08T07:58:53,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38357 is added to blk_1073741838_1019 (size=1264) 2024-12-08T07:58:53,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741838_1019 (size=1264) 2024-12-08T07:58:53,439 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644725891 is not closed yet, will try archiving it next time 2024-12-08T07:58:53,447 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1733644712736/Put/vlen=218/seqid=0] 2024-12-08T07:58:53,447 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1733644722469/Put/vlen=1045/seqid=0] 2024-12-08T07:58:53,447 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644711762 2024-12-08T07:58:53,447 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644725891 2024-12-08T07:58:53,447 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644725891 2024-12-08T07:58:53,448 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644725891 after 1ms 2024-12-08T07:58:53,448 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644725891 2024-12-08T07:58:53,452 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1733644725890/Put/vlen=1045/seqid=0] 2024-12-08T07:58:53,452 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1733644727903/Put/vlen=1045/seqid=0] 2024-12-08T07:58:53,452 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644725891 2024-12-08T07:58:53,452 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644731409 2024-12-08T07:58:53,452 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644731409 2024-12-08T07:58:53,453 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644731409 after 1ms 2024-12-08T07:58:53,453 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644731409 2024-12-08T07:58:53,457 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1733644731409/Put/vlen=1045/seqid=0] 2024-12-08T07:58:53,457 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644733423 2024-12-08T07:58:53,457 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644733423 2024-12-08T07:58:53,457 WARN [IPC Server handler 4 on default port 45485 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644733423 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-12-08T07:58:53,457 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644733423 after 0ms 2024-12-08T07:58:53,614 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-08T07:58:53,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:53,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:53,849 WARN [ResponseProcessor for block BP-610407669-172.17.0.2-1733644708587:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-610407669-172.17.0.2-1733644708587:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:53,849 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-300342028_22 at /127.0.0.1:44112 [Receiving block BP-610407669-172.17.0.2-1733644708587:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:37851:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44112 dst: /127.0.0.1:37851 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:37851 remote=/127.0.0.1:44112]. Total timeout mills is 60000, 59584 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:53,849 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-300342028_22 at /127.0.0.1:34060 [Receiving block BP-610407669-172.17.0.2-1733644708587:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:38357:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34060 dst: /127.0.0.1:38357 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:58:53,850 WARN [DataStreamer for file /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644733423 block BP-610407669-172.17.0.2-1733644708587:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-610407669-172.17.0.2-1733644708587:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37851,DS-5d557bf9-4278-493d-a6cf-d2b9043b3d49,DISK], DatanodeInfoWithStorage[127.0.0.1:38357,DS-bb5c09d6-5850-4601-95c1-c11917a34e29,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37851,DS-5d557bf9-4278-493d-a6cf-d2b9043b3d49,DISK]) is bad. 2024-12-08T07:58:53,850 WARN [DataStreamer for file /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644733423 block BP-610407669-172.17.0.2-1733644708587:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-610407669-172.17.0.2-1733644708587:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:53,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741839_1022 (size=85) 2024-12-08T07:58:54,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:54,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:55,419 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644725891 after 4002ms 2024-12-08T07:58:55,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:55,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:56,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:56,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:57,458 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644733423 after 4001ms 2024-12-08T07:58:57,458 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644733423 2024-12-08T07:58:57,464 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644733423 2024-12-08T07:58:57,465 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 3da59edd4c184fada39db32f0a7e9e0c 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-08T07:58:57,465 ERROR [FSHLog-0-hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42-prefix:0106a245d0e8,40501,1733644711128 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-610407669-172.17.0.2-1733644708587:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:57,466 WARN [FSHLog-0-hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42-prefix:0106a245d0e8,40501,1733644711128 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-610407669-172.17.0.2-1733644708587:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:57,466 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0106a245d0e8%2C40501%2C1733644711128:(num 1733644733423) roll requested 2024-12-08T07:58:57,466 INFO [regionserver/0106a245d0e8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C40501%2C1733644711128.1733644737466 2024-12-08T07:58:57,476 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644733423 newFile=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644737466 2024-12-08T07:58:57,476 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:57,476 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:57,476 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:57,477 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:57,477 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:57,477 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644733423 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644737466 2024-12-08T07:58:57,477 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-610407669-172.17.0.2-1733644708587:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:57,477 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-610407669-172.17.0.2-1733644708587:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:57,478 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644733423 2024-12-08T07:58:57,478 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644733423 after 0ms 2024-12-08T07:58:57,487 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.1733644733423 to hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/oldWALs/0106a245d0e8%2C40501%2C1733644711128.1733644733423 2024-12-08T07:58:57,487 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46359:46359),(127.0.0.1/127.0.0.1:34909:34909)] 2024-12-08T07:58:57,505 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/data/default/TestLogRolling-testLogRollOnPipelineRestart/3da59edd4c184fada39db32f0a7e9e0c/.tmp/info/e3ca8fd5eab24685b65bf293713e5538 is 1080, key is row1002/info:/1733644722469/Put/seqid=0 2024-12-08T07:58:57,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741841_1024 (size=9270) 2024-12-08T07:58:57,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38357 is added to blk_1073741841_1024 (size=9270) 2024-12-08T07:58:57,510 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/data/default/TestLogRolling-testLogRollOnPipelineRestart/3da59edd4c184fada39db32f0a7e9e0c/.tmp/info/e3ca8fd5eab24685b65bf293713e5538 2024-12-08T07:58:57,516 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/data/default/TestLogRolling-testLogRollOnPipelineRestart/3da59edd4c184fada39db32f0a7e9e0c/.tmp/info/e3ca8fd5eab24685b65bf293713e5538 as hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/data/default/TestLogRolling-testLogRollOnPipelineRestart/3da59edd4c184fada39db32f0a7e9e0c/info/e3ca8fd5eab24685b65bf293713e5538 2024-12-08T07:58:57,521 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/data/default/TestLogRolling-testLogRollOnPipelineRestart/3da59edd4c184fada39db32f0a7e9e0c/info/e3ca8fd5eab24685b65bf293713e5538, entries=4, sequenceid=8, filesize=9.1 K 2024-12-08T07:58:57,522 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 3da59edd4c184fada39db32f0a7e9e0c in 58ms, sequenceid=8, compaction requested=false 2024-12-08T07:58:57,523 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 3da59edd4c184fada39db32f0a7e9e0c: 2024-12-08T07:58:57,523 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-12-08T07:58:57,523 ERROR [FSHLog-0-hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42-prefix:0106a245d0e8,40501,1733644711128.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34575,DS-5d557bf9-4278-493d-a6cf-d2b9043b3d49,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:57,523 WARN [FSHLog-0-hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42-prefix:0106a245d0e8,40501,1733644711128.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34575,DS-5d557bf9-4278-493d-a6cf-d2b9043b3d49,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:57,523 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0106a245d0e8%2C40501%2C1733644711128.meta:.meta(num 1733644712267) roll requested 2024-12-08T07:58:57,524 INFO [regionserver/0106a245d0e8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C40501%2C1733644711128.meta.1733644737524.meta 2024-12-08T07:58:57,529 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:57,529 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:57,529 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:57,529 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:57,529 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:57,529 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.meta.1733644712267.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.meta.1733644737524.meta 2024-12-08T07:58:57,529 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34575,DS-5d557bf9-4278-493d-a6cf-d2b9043b3d49,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:57,530 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34575,DS-5d557bf9-4278-493d-a6cf-d2b9043b3d49,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:58:57,530 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.meta.1733644712267.meta 2024-12-08T07:58:57,530 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34909:34909),(127.0.0.1/127.0.0.1:46359:46359)] 2024-12-08T07:58:57,530 DEBUG [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.meta.1733644712267.meta is not closed yet, will try archiving it next time 2024-12-08T07:58:57,530 WARN [IPC Server handler 0 on default port 45485 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.meta.1733644712267.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1013 2024-12-08T07:58:57,530 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.meta.1733644712267.meta after 0ms 2024-12-08T07:58:57,551 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/data/hbase/meta/1588230740/.tmp/info/ed171e0e28e04a2b83f48d6c4534f47f is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1733644712379.3da59edd4c184fada39db32f0a7e9e0c./info:regioninfo/1733644712740/Put/seqid=0 2024-12-08T07:58:57,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741843_1027 (size=7125) 2024-12-08T07:58:57,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38357 is added to blk_1073741843_1027 (size=7125) 2024-12-08T07:58:57,559 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/data/hbase/meta/1588230740/.tmp/info/ed171e0e28e04a2b83f48d6c4534f47f 2024-12-08T07:58:57,579 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/data/hbase/meta/1588230740/.tmp/ns/b6ed8d7d6b7e4b38808475f3467c8e05 is 43, key is default/ns:d/1733644712343/Put/seqid=0 2024-12-08T07:58:57,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38357 is added to blk_1073741844_1028 (size=5153) 2024-12-08T07:58:57,584 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/data/hbase/meta/1588230740/.tmp/ns/b6ed8d7d6b7e4b38808475f3467c8e05 2024-12-08T07:58:57,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741844_1028 (size=5153) 2024-12-08T07:58:57,604 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/data/hbase/meta/1588230740/.tmp/table/53c26969507b42ffb5577fc2e33d2bdb is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1733644712751/Put/seqid=0 2024-12-08T07:58:57,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741845_1029 (size=5438) 2024-12-08T07:58:57,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38357 is added to blk_1073741845_1029 (size=5438) 2024-12-08T07:58:57,609 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/data/hbase/meta/1588230740/.tmp/table/53c26969507b42ffb5577fc2e33d2bdb 2024-12-08T07:58:57,615 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/data/hbase/meta/1588230740/.tmp/info/ed171e0e28e04a2b83f48d6c4534f47f as hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/data/hbase/meta/1588230740/info/ed171e0e28e04a2b83f48d6c4534f47f 2024-12-08T07:58:57,620 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/data/hbase/meta/1588230740/info/ed171e0e28e04a2b83f48d6c4534f47f, entries=10, sequenceid=11, filesize=7.0 K 2024-12-08T07:58:57,621 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/data/hbase/meta/1588230740/.tmp/ns/b6ed8d7d6b7e4b38808475f3467c8e05 as hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/data/hbase/meta/1588230740/ns/b6ed8d7d6b7e4b38808475f3467c8e05 2024-12-08T07:58:57,628 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/data/hbase/meta/1588230740/ns/b6ed8d7d6b7e4b38808475f3467c8e05, entries=2, sequenceid=11, filesize=5.0 K 2024-12-08T07:58:57,629 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/data/hbase/meta/1588230740/.tmp/table/53c26969507b42ffb5577fc2e33d2bdb as hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/data/hbase/meta/1588230740/table/53c26969507b42ffb5577fc2e33d2bdb 2024-12-08T07:58:57,635 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/data/hbase/meta/1588230740/table/53c26969507b42ffb5577fc2e33d2bdb, entries=2, sequenceid=11, filesize=5.3 K 2024-12-08T07:58:57,636 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 113ms, sequenceid=11, compaction requested=false 2024-12-08T07:58:57,636 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-08T07:58:57,641 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-08T07:58:57,642 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T07:58:57,642 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T07:58:57,642 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T07:58:57,642 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T07:58:57,642 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-08T07:58:57,642 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-08T07:58:57,642 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=274457869, stopped=false 2024-12-08T07:58:57,642 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=0106a245d0e8,35031,1733644710922 2024-12-08T07:58:57,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:57,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40501-0x100046efac30001, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T07:58:57,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35031-0x100046efac30000, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T07:58:57,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40501-0x100046efac30001, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:58:57,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35031-0x100046efac30000, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:58:57,704 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T07:58:57,704 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T07:58:57,704 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T07:58:57,704 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T07:58:57,704 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35031-0x100046efac30000, quorum=127.0.0.1:51084, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T07:58:57,704 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40501-0x100046efac30001, quorum=127.0.0.1:51084, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T07:58:57,704 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0106a245d0e8,40501,1733644711128' ***** 2024-12-08T07:58:57,704 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T07:58:57,705 INFO [RS:0;0106a245d0e8:40501 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T07:58:57,705 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T07:58:57,705 INFO [RS:0;0106a245d0e8:40501 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T07:58:57,705 INFO [RS:0;0106a245d0e8:40501 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T07:58:57,705 INFO [RS:0;0106a245d0e8:40501 {}] regionserver.HRegionServer(3091): Received CLOSE for 3da59edd4c184fada39db32f0a7e9e0c 2024-12-08T07:58:57,705 INFO [RS:0;0106a245d0e8:40501 {}] regionserver.HRegionServer(959): stopping server 0106a245d0e8,40501,1733644711128 2024-12-08T07:58:57,705 INFO [RS:0;0106a245d0e8:40501 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T07:58:57,706 INFO [RS:0;0106a245d0e8:40501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;0106a245d0e8:40501. 2024-12-08T07:58:57,706 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 3da59edd4c184fada39db32f0a7e9e0c, disabling compactions & flushes 2024-12-08T07:58:57,706 DEBUG [RS:0;0106a245d0e8:40501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T07:58:57,706 INFO [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733644712379.3da59edd4c184fada39db32f0a7e9e0c. 2024-12-08T07:58:57,706 DEBUG [RS:0;0106a245d0e8:40501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T07:58:57,706 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733644712379.3da59edd4c184fada39db32f0a7e9e0c. 2024-12-08T07:58:57,706 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733644712379.3da59edd4c184fada39db32f0a7e9e0c. after waiting 0 ms 2024-12-08T07:58:57,706 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733644712379.3da59edd4c184fada39db32f0a7e9e0c. 2024-12-08T07:58:57,706 INFO [RS:0;0106a245d0e8:40501 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T07:58:57,706 INFO [RS:0;0106a245d0e8:40501 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T07:58:57,706 INFO [RS:0;0106a245d0e8:40501 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T07:58:57,706 INFO [RS:0;0106a245d0e8:40501 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-08T07:58:57,706 INFO [RS:0;0106a245d0e8:40501 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-08T07:58:57,706 DEBUG [RS:0;0106a245d0e8:40501 {}] regionserver.HRegionServer(1325): Online Regions={3da59edd4c184fada39db32f0a7e9e0c=TestLogRolling-testLogRollOnPipelineRestart,,1733644712379.3da59edd4c184fada39db32f0a7e9e0c., 1588230740=hbase:meta,,1.1588230740} 2024-12-08T07:58:57,706 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T07:58:57,706 DEBUG [RS:0;0106a245d0e8:40501 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 3da59edd4c184fada39db32f0a7e9e0c 2024-12-08T07:58:57,706 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T07:58:57,706 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T07:58:57,706 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T07:58:57,706 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T07:58:57,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:57,715 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/data/default/TestLogRolling-testLogRollOnPipelineRestart/3da59edd4c184fada39db32f0a7e9e0c/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-12-08T07:58:57,716 INFO [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733644712379.3da59edd4c184fada39db32f0a7e9e0c. 2024-12-08T07:58:57,716 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 3da59edd4c184fada39db32f0a7e9e0c: Waiting for close lock at 1733644737705Running coprocessor pre-close hooks at 1733644737705Disabling compacts and flushes for region at 1733644737705Disabling writes for close at 1733644737706 (+1 ms)Writing region close event to WAL at 1733644737707 (+1 ms)Running coprocessor post-close hooks at 1733644737716 (+9 ms)Closed at 1733644737716 2024-12-08T07:58:57,716 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733644712379.3da59edd4c184fada39db32f0a7e9e0c. 2024-12-08T07:58:57,718 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-08T07:58:57,719 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T07:58:57,719 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T07:58:57,719 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733644737706Running coprocessor pre-close hooks at 1733644737706Disabling compacts and flushes for region at 1733644737706Disabling writes for close at 1733644737706Writing region close event to WAL at 1733644737715 (+9 ms)Running coprocessor post-close hooks at 1733644737719 (+4 ms)Closed at 1733644737719 2024-12-08T07:58:57,719 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-08T07:58:57,907 INFO [RS:0;0106a245d0e8:40501 {}] regionserver.HRegionServer(976): stopping server 0106a245d0e8,40501,1733644711128; all regions closed. 2024-12-08T07:58:57,907 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:57,908 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:57,908 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:57,908 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:57,908 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:58:57,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741842_1025 (size=825) 2024-12-08T07:58:57,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38357 is added to blk_1073741842_1025 (size=825) 2024-12-08T07:58:58,168 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T07:58:58,168 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-08T07:58:58,169 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-08T07:58:58,620 INFO [regionserver/0106a245d0e8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-08T07:58:58,620 INFO [regionserver/0106a245d0e8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-08T07:58:58,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:58,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:59,621 INFO [regionserver/0106a245d0e8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T07:58:59,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:59,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:58:59,847 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1013: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-08T07:59:00,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:00,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:00,906 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T07:59:01,531 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.meta.1733644712267.meta after 4001ms 2024-12-08T07:59:01,532 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/WALs/0106a245d0e8,40501,1733644711128/0106a245d0e8%2C40501%2C1733644711128.meta.1733644712267.meta to hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/oldWALs/0106a245d0e8%2C40501%2C1733644711128.meta.1733644712267.meta 2024-12-08T07:59:01,537 DEBUG [RS:0;0106a245d0e8:40501 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/oldWALs 2024-12-08T07:59:01,537 INFO [RS:0;0106a245d0e8:40501 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0106a245d0e8%2C40501%2C1733644711128.meta:.meta(num 1733644737524) 2024-12-08T07:59:01,537 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:59:01,538 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:59:01,538 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:59:01,538 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:59:01,538 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:59:01,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38357 is added to blk_1073741840_1023 (size=1162) 2024-12-08T07:59:01,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741840_1023 (size=1162) 2024-12-08T07:59:01,546 DEBUG [RS:0;0106a245d0e8:40501 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/oldWALs 2024-12-08T07:59:01,546 INFO [RS:0;0106a245d0e8:40501 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0106a245d0e8%2C40501%2C1733644711128:(num 1733644737466) 2024-12-08T07:59:01,546 DEBUG [RS:0;0106a245d0e8:40501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T07:59:01,546 INFO [RS:0;0106a245d0e8:40501 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T07:59:01,546 INFO [RS:0;0106a245d0e8:40501 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T07:59:01,547 INFO [RS:0;0106a245d0e8:40501 {}] hbase.ChoreService(370): Chore service for: regionserver/0106a245d0e8:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-08T07:59:01,547 INFO [RS:0;0106a245d0e8:40501 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T07:59:01,547 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T07:59:01,547 INFO [RS:0;0106a245d0e8:40501 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40501 2024-12-08T07:59:01,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40501-0x100046efac30001, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0106a245d0e8,40501,1733644711128 2024-12-08T07:59:01,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35031-0x100046efac30000, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T07:59:01,593 INFO [RS:0;0106a245d0e8:40501 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T07:59:01,669 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0106a245d0e8,40501,1733644711128] 2024-12-08T07:59:01,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:01,714 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0106a245d0e8,40501,1733644711128 already deleted, retry=false 2024-12-08T07:59:01,714 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0106a245d0e8,40501,1733644711128 expired; onlineServers=0 2024-12-08T07:59:01,714 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '0106a245d0e8,35031,1733644710922' ***** 2024-12-08T07:59:01,714 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-08T07:59:01,714 INFO [M:0;0106a245d0e8:35031 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T07:59:01,714 INFO [M:0;0106a245d0e8:35031 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T07:59:01,715 DEBUG [M:0;0106a245d0e8:35031 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-08T07:59:01,715 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-08T07:59:01,715 DEBUG [M:0;0106a245d0e8:35031 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-08T07:59:01,715 DEBUG [master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.large.0-1733644711536 {}] cleaner.HFileCleaner(306): Exit Thread[master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.large.0-1733644711536,5,FailOnTimeoutGroup] 2024-12-08T07:59:01,715 DEBUG [master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.small.0-1733644711536 {}] cleaner.HFileCleaner(306): Exit Thread[master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.small.0-1733644711536,5,FailOnTimeoutGroup] 2024-12-08T07:59:01,715 INFO [M:0;0106a245d0e8:35031 {}] hbase.ChoreService(370): Chore service for: master/0106a245d0e8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-08T07:59:01,715 INFO [M:0;0106a245d0e8:35031 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T07:59:01,715 DEBUG [M:0;0106a245d0e8:35031 {}] master.HMaster(1795): Stopping service threads 2024-12-08T07:59:01,715 INFO [M:0;0106a245d0e8:35031 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-08T07:59:01,715 INFO [M:0;0106a245d0e8:35031 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T07:59:01,715 INFO [M:0;0106a245d0e8:35031 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-08T07:59:01,716 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-08T07:59:01,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:01,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35031-0x100046efac30000, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T07:59:01,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35031-0x100046efac30000, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:59:01,741 DEBUG [M:0;0106a245d0e8:35031 {}] zookeeper.ZKUtil(347): master:35031-0x100046efac30000, quorum=127.0.0.1:51084, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-08T07:59:01,741 WARN [M:0;0106a245d0e8:35031 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-08T07:59:01,742 INFO [M:0;0106a245d0e8:35031 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/.lastflushedseqids 2024-12-08T07:59:01,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741846_1030 (size=111) 2024-12-08T07:59:01,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38357 is added to blk_1073741846_1030 (size=111) 2024-12-08T07:59:01,751 INFO [M:0;0106a245d0e8:35031 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-08T07:59:01,751 INFO [M:0;0106a245d0e8:35031 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-08T07:59:01,751 DEBUG [M:0;0106a245d0e8:35031 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T07:59:01,751 INFO [M:0;0106a245d0e8:35031 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:59:01,751 DEBUG [M:0;0106a245d0e8:35031 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:59:01,751 DEBUG [M:0;0106a245d0e8:35031 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T07:59:01,751 DEBUG [M:0;0106a245d0e8:35031 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:59:01,752 INFO [M:0;0106a245d0e8:35031 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.17 KB heapSize=29.16 KB 2024-12-08T07:59:01,752 ERROR [FSHLog-0-hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData-prefix:0106a245d0e8,35031,1733644710922 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34575,DS-5d557bf9-4278-493d-a6cf-d2b9043b3d49,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:59:01,752 WARN [FSHLog-0-hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData-prefix:0106a245d0e8,35031,1733644710922 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34575,DS-5d557bf9-4278-493d-a6cf-d2b9043b3d49,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:59:01,752 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 0106a245d0e8%2C35031%2C1733644710922:(num 1733644711278) roll requested 2024-12-08T07:59:01,753 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C35031%2C1733644710922.1733644741752 2024-12-08T07:59:01,759 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:59:01,759 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:59:01,759 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:59:01,759 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:59:01,759 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:59:01,759 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/WALs/0106a245d0e8,35031,1733644710922/0106a245d0e8%2C35031%2C1733644710922.1733644711278 with entries=53, filesize=26.62 KB; new WAL /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/WALs/0106a245d0e8,35031,1733644710922/0106a245d0e8%2C35031%2C1733644710922.1733644741752 2024-12-08T07:59:01,765 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34575,DS-5d557bf9-4278-493d-a6cf-d2b9043b3d49,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:59:01,765 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34575,DS-5d557bf9-4278-493d-a6cf-d2b9043b3d49,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T07:59:01,765 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/WALs/0106a245d0e8,35031,1733644710922/0106a245d0e8%2C35031%2C1733644710922.1733644711278 2024-12-08T07:59:01,765 WARN [IPC Server handler 3 on default port 45485 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/WALs/0106a245d0e8,35031,1733644710922/0106a245d0e8%2C35031%2C1733644710922.1733644711278 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-12-08T07:59:01,765 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/WALs/0106a245d0e8,35031,1733644710922/0106a245d0e8%2C35031%2C1733644710922.1733644711278 after 0ms 2024-12-08T07:59:01,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40501-0x100046efac30001, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T07:59:01,769 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46359:46359),(127.0.0.1/127.0.0.1:34909:34909)] 2024-12-08T07:59:01,769 INFO [RS:0;0106a245d0e8:40501 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T07:59:01,769 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/WALs/0106a245d0e8,35031,1733644710922/0106a245d0e8%2C35031%2C1733644710922.1733644711278 is not closed yet, will try archiving it next time 2024-12-08T07:59:01,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40501-0x100046efac30001, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T07:59:01,769 INFO [RS:0;0106a245d0e8:40501 {}] regionserver.HRegionServer(1031): Exiting; stopping=0106a245d0e8,40501,1733644711128; zookeeper connection closed. 2024-12-08T07:59:01,769 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@614e95fa {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@614e95fa 2024-12-08T07:59:01,769 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-08T07:59:01,783 DEBUG [M:0;0106a245d0e8:35031 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/33882af1817c41ad8fdf608701613295 is 82, key is hbase:meta,,1/info:regioninfo/1733644712295/Put/seqid=0 2024-12-08T07:59:01,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741848_1033 (size=5672) 2024-12-08T07:59:01,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38357 is added to blk_1073741848_1033 (size=5672) 2024-12-08T07:59:01,788 INFO [M:0;0106a245d0e8:35031 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/33882af1817c41ad8fdf608701613295 2024-12-08T07:59:01,806 DEBUG [M:0;0106a245d0e8:35031 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/00418156232b49bb86c3f066e0348afd is 778, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733644712756/Put/seqid=0 2024-12-08T07:59:01,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38357 is added to blk_1073741849_1034 (size=6118) 2024-12-08T07:59:01,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741849_1034 (size=6118) 2024-12-08T07:59:01,812 INFO [M:0;0106a245d0e8:35031 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/00418156232b49bb86c3f066e0348afd 2024-12-08T07:59:01,834 DEBUG [M:0;0106a245d0e8:35031 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1e0b25011a014c6e9f2231e188f423e0 is 69, key is 0106a245d0e8,40501,1733644711128/rs:state/1733644711597/Put/seqid=0 2024-12-08T07:59:01,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741850_1035 (size=5156) 2024-12-08T07:59:01,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38357 is added to blk_1073741850_1035 (size=5156) 2024-12-08T07:59:01,839 INFO [M:0;0106a245d0e8:35031 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1e0b25011a014c6e9f2231e188f423e0 2024-12-08T07:59:01,861 DEBUG [M:0;0106a245d0e8:35031 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/bdf82b6425eb43cb9a339ce01a1db1f8 is 52, key is load_balancer_on/state:d/1733644712374/Put/seqid=0 2024-12-08T07:59:01,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741851_1036 (size=5056) 2024-12-08T07:59:01,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38357 is added to blk_1073741851_1036 (size=5056) 2024-12-08T07:59:01,866 INFO [M:0;0106a245d0e8:35031 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/bdf82b6425eb43cb9a339ce01a1db1f8 2024-12-08T07:59:01,871 DEBUG [M:0;0106a245d0e8:35031 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/33882af1817c41ad8fdf608701613295 as hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/33882af1817c41ad8fdf608701613295 2024-12-08T07:59:01,878 INFO [M:0;0106a245d0e8:35031 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/33882af1817c41ad8fdf608701613295, entries=8, sequenceid=56, filesize=5.5 K 2024-12-08T07:59:01,879 DEBUG [M:0;0106a245d0e8:35031 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/00418156232b49bb86c3f066e0348afd as hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/00418156232b49bb86c3f066e0348afd 2024-12-08T07:59:01,885 INFO [M:0;0106a245d0e8:35031 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/00418156232b49bb86c3f066e0348afd, entries=6, sequenceid=56, filesize=6.0 K 2024-12-08T07:59:01,886 DEBUG [M:0;0106a245d0e8:35031 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1e0b25011a014c6e9f2231e188f423e0 as hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1e0b25011a014c6e9f2231e188f423e0 2024-12-08T07:59:01,892 INFO [M:0;0106a245d0e8:35031 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1e0b25011a014c6e9f2231e188f423e0, entries=1, sequenceid=56, filesize=5.0 K 2024-12-08T07:59:01,893 DEBUG [M:0;0106a245d0e8:35031 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/bdf82b6425eb43cb9a339ce01a1db1f8 as hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/bdf82b6425eb43cb9a339ce01a1db1f8 2024-12-08T07:59:01,898 INFO [M:0;0106a245d0e8:35031 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/bdf82b6425eb43cb9a339ce01a1db1f8, entries=1, sequenceid=56, filesize=4.9 K 2024-12-08T07:59:01,899 INFO [M:0;0106a245d0e8:35031 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 148ms, sequenceid=56, compaction requested=false 2024-12-08T07:59:01,901 INFO [M:0;0106a245d0e8:35031 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:59:01,901 DEBUG [M:0;0106a245d0e8:35031 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733644741751Disabling compacts and flushes for region at 1733644741751Disabling writes for close at 1733644741751Obtaining lock to block concurrent updates at 1733644741752 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733644741752Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23726, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1733644741752Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733644741770 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733644741770Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733644741783 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733644741783Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733644741793 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733644741806 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733644741806Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733644741817 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733644741833 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733644741833Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733644741844 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733644741860 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733644741860Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6c3f15f0: reopening flushed file at 1733644741871 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@ca75063: reopening flushed file at 1733644741878 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@99fbba4: reopening flushed file at 1733644741885 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@34c99903: reopening flushed file at 1733644741892 (+7 ms)Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 148ms, sequenceid=56, compaction requested=false at 1733644741899 (+7 ms)Writing region close event to WAL at 1733644741900 (+1 ms)Closed at 1733644741901 (+1 ms) 2024-12-08T07:59:01,901 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:59:01,901 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:59:01,901 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:59:01,901 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:59:01,901 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:59:01,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38357 is added to blk_1073741847_1031 (size=757) 2024-12-08T07:59:01,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741847_1031 (size=757) 2024-12-08T07:59:02,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:02,717 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:02,717 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:02,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:02,734 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:02,735 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:02,735 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:02,735 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:02,735 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:02,736 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:02,738 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:02,738 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:02,739 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:02,741 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:02,745 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:02,745 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:03,248 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-08T07:59:03,249 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:03,249 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:03,249 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:03,249 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:03,264 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:03,265 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:03,265 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:03,265 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:03,265 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:03,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:03,269 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:03,269 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:03,269 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:03,271 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:03,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:03,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:04,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:04,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:05,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:05,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:05,766 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/WALs/0106a245d0e8,35031,1733644710922/0106a245d0e8%2C35031%2C1733644710922.1733644711278 after 4001ms 2024-12-08T07:59:05,767 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/WALs/0106a245d0e8,35031,1733644710922/0106a245d0e8%2C35031%2C1733644710922.1733644711278 to hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/oldWALs/0106a245d0e8%2C35031%2C1733644710922.1733644711278 2024-12-08T07:59:05,770 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/MasterData/oldWALs/0106a245d0e8%2C35031%2C1733644710922.1733644711278 to hdfs://localhost:45485/user/jenkins/test-data/e672819b-0be6-16dd-26ec-2ece7e291c42/oldWALs/0106a245d0e8%2C35031%2C1733644710922.1733644711278$masterlocalwal$ 2024-12-08T07:59:05,770 INFO [M:0;0106a245d0e8:35031 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-08T07:59:05,770 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T07:59:05,770 INFO [M:0;0106a245d0e8:35031 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35031 2024-12-08T07:59:05,770 INFO [M:0;0106a245d0e8:35031 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T07:59:05,847 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-08T07:59:05,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35031-0x100046efac30000, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T07:59:05,925 INFO [M:0;0106a245d0e8:35031 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T07:59:05,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35031-0x100046efac30000, quorum=127.0.0.1:51084, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T07:59:05,928 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5e666703{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:59:05,928 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2dd41fe4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T07:59:05,928 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T07:59:05,928 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@66269315{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T07:59:05,928 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7202b0aa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/hadoop.log.dir/,STOPPED} 2024-12-08T07:59:05,930 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T07:59:05,930 WARN [BP-610407669-172.17.0.2-1733644708587 heartbeating to localhost/127.0.0.1:45485 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T07:59:05,930 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T07:59:05,930 WARN [BP-610407669-172.17.0.2-1733644708587 heartbeating to localhost/127.0.0.1:45485 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-610407669-172.17.0.2-1733644708587 (Datanode Uuid cc6fc935-c807-410e-9199-7ae51bb9da38) service to localhost/127.0.0.1:45485 2024-12-08T07:59:05,931 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/cluster_c806b1b6-4479-4c06-7f25-b09e8cb54243/data/data3/current/BP-610407669-172.17.0.2-1733644708587 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T07:59:05,931 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/cluster_c806b1b6-4479-4c06-7f25-b09e8cb54243/data/data4/current/BP-610407669-172.17.0.2-1733644708587 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T07:59:05,931 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T07:59:05,934 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@647c40ef{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:59:05,934 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@38ca15c7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T07:59:05,934 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T07:59:05,935 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a15b38f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T07:59:05,935 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@58c81822{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/hadoop.log.dir/,STOPPED} 2024-12-08T07:59:05,936 WARN [BP-610407669-172.17.0.2-1733644708587 heartbeating to localhost/127.0.0.1:45485 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T07:59:05,936 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T07:59:05,936 WARN [BP-610407669-172.17.0.2-1733644708587 heartbeating to localhost/127.0.0.1:45485 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-610407669-172.17.0.2-1733644708587 (Datanode Uuid 86420e16-6872-4605-8a97-c126c7de6b47) service to localhost/127.0.0.1:45485 2024-12-08T07:59:05,936 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T07:59:05,937 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/cluster_c806b1b6-4479-4c06-7f25-b09e8cb54243/data/data1/current/BP-610407669-172.17.0.2-1733644708587 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T07:59:05,937 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/cluster_c806b1b6-4479-4c06-7f25-b09e8cb54243/data/data2/current/BP-610407669-172.17.0.2-1733644708587 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T07:59:05,937 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T07:59:05,942 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@482b646b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T07:59:05,943 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7c7a3196{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T07:59:05,943 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T07:59:05,943 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@42369482{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T07:59:05,943 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@37ba1ac4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/hadoop.log.dir/,STOPPED} 2024-12-08T07:59:05,948 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-08T07:59:05,967 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-08T07:59:05,975 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 154) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45485 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45485 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45485 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45485 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45485 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45485 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:45485 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45485 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=457 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=203 (was 191) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=8267 (was 8441) 2024-12-08T07:59:05,982 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=203, ProcessCount=11, AvailableMemoryMB=8267 2024-12-08T07:59:05,983 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-08T07:59:05,983 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/hadoop.log.dir so I do NOT create it in target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846 2024-12-08T07:59:05,983 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f35a735c-711a-2e35-1b8f-223288149e7a/hadoop.tmp.dir so I do NOT create it in target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846 2024-12-08T07:59:05,983 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/cluster_b3167e19-aa75-8dc8-fb12-43d0adad6fe8, deleteOnExit=true 2024-12-08T07:59:05,983 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-08T07:59:05,983 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/test.cache.data in system properties and HBase conf 2024-12-08T07:59:05,983 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/hadoop.tmp.dir in system properties and HBase conf 2024-12-08T07:59:05,983 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/hadoop.log.dir in system properties and HBase conf 2024-12-08T07:59:05,984 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-08T07:59:05,984 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-08T07:59:05,984 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-08T07:59:05,984 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-08T07:59:05,984 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-08T07:59:05,984 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-08T07:59:05,984 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-08T07:59:05,985 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T07:59:05,985 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-08T07:59:05,985 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-08T07:59:05,985 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T07:59:05,985 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T07:59:05,985 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-08T07:59:05,985 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/nfs.dump.dir in system properties and HBase conf 2024-12-08T07:59:05,985 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/java.io.tmpdir in system properties and HBase conf 2024-12-08T07:59:05,985 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T07:59:05,985 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-08T07:59:05,986 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-08T07:59:06,001 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T07:59:06,336 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T07:59:06,340 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T07:59:06,341 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T07:59:06,341 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T07:59:06,341 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T07:59:06,342 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T07:59:06,342 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e470e04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/hadoop.log.dir/,AVAILABLE} 2024-12-08T07:59:06,342 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4732430a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T07:59:06,442 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@206f042f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/java.io.tmpdir/jetty-localhost-44923-hadoop-hdfs-3_4_1-tests_jar-_-any-12915807200298776257/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T07:59:06,442 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e21aaf2{HTTP/1.1, (http/1.1)}{localhost:44923} 2024-12-08T07:59:06,442 INFO [Time-limited test {}] server.Server(415): Started @194010ms 2024-12-08T07:59:06,453 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T07:59:06,689 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T07:59:06,692 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T07:59:06,693 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T07:59:06,693 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T07:59:06,693 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T07:59:06,693 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@443c49f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/hadoop.log.dir/,AVAILABLE} 2024-12-08T07:59:06,693 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@77f859f0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T07:59:06,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:06,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:06,800 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5570a111{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/java.io.tmpdir/jetty-localhost-33571-hadoop-hdfs-3_4_1-tests_jar-_-any-16460131603932114539/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:59:06,801 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@61fcc471{HTTP/1.1, (http/1.1)}{localhost:33571} 2024-12-08T07:59:06,801 INFO [Time-limited test {}] server.Server(415): Started @194368ms 2024-12-08T07:59:06,802 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T07:59:06,831 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T07:59:06,834 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T07:59:06,834 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T07:59:06,834 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T07:59:06,834 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T07:59:06,835 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@26731840{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/hadoop.log.dir/,AVAILABLE} 2024-12-08T07:59:06,835 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@bff2115{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T07:59:06,935 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b173219{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/java.io.tmpdir/jetty-localhost-33285-hadoop-hdfs-3_4_1-tests_jar-_-any-14718655255259661545/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T07:59:06,936 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7c1a9f69{HTTP/1.1, (http/1.1)}{localhost:33285} 2024-12-08T07:59:06,936 INFO [Time-limited test {}] server.Server(415): Started @194504ms 2024-12-08T07:59:06,937 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T07:59:07,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:07,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:08,168 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-08T07:59:08,220 WARN [Thread-1649 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/cluster_b3167e19-aa75-8dc8-fb12-43d0adad6fe8/data/data1/current/BP-1167299508-172.17.0.2-1733644746014/current, will proceed with Du for space computation calculation, 2024-12-08T07:59:08,220 WARN [Thread-1650 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/cluster_b3167e19-aa75-8dc8-fb12-43d0adad6fe8/data/data2/current/BP-1167299508-172.17.0.2-1733644746014/current, will proceed with Du for space computation calculation, 2024-12-08T07:59:08,241 WARN [Thread-1613 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T07:59:08,243 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x22b2658b44fd96df with lease ID 0x62fc307da52a836b: Processing first storage report for DS-bfdbb3d9-ed35-448f-8c2d-356f41415792 from datanode DatanodeRegistration(127.0.0.1:37527, datanodeUuid=17d897d4-f2d3-49fe-8b99-2ef0e5530908, infoPort=33665, infoSecurePort=0, ipcPort=41289, storageInfo=lv=-57;cid=testClusterID;nsid=1648399136;c=1733644746014) 2024-12-08T07:59:08,243 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x22b2658b44fd96df with lease ID 0x62fc307da52a836b: from storage DS-bfdbb3d9-ed35-448f-8c2d-356f41415792 node DatanodeRegistration(127.0.0.1:37527, datanodeUuid=17d897d4-f2d3-49fe-8b99-2ef0e5530908, infoPort=33665, infoSecurePort=0, ipcPort=41289, storageInfo=lv=-57;cid=testClusterID;nsid=1648399136;c=1733644746014), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:59:08,243 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x22b2658b44fd96df with lease ID 0x62fc307da52a836b: Processing first storage report for DS-dbc552ad-8b93-45ec-9dce-7e1b0bd90a60 from datanode DatanodeRegistration(127.0.0.1:37527, datanodeUuid=17d897d4-f2d3-49fe-8b99-2ef0e5530908, infoPort=33665, infoSecurePort=0, ipcPort=41289, storageInfo=lv=-57;cid=testClusterID;nsid=1648399136;c=1733644746014) 2024-12-08T07:59:08,243 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x22b2658b44fd96df with lease ID 0x62fc307da52a836b: from storage DS-dbc552ad-8b93-45ec-9dce-7e1b0bd90a60 node DatanodeRegistration(127.0.0.1:37527, datanodeUuid=17d897d4-f2d3-49fe-8b99-2ef0e5530908, infoPort=33665, infoSecurePort=0, ipcPort=41289, storageInfo=lv=-57;cid=testClusterID;nsid=1648399136;c=1733644746014), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:59:08,359 WARN [Thread-1661 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/cluster_b3167e19-aa75-8dc8-fb12-43d0adad6fe8/data/data4/current/BP-1167299508-172.17.0.2-1733644746014/current, will proceed with Du for space computation calculation, 2024-12-08T07:59:08,359 WARN [Thread-1660 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/cluster_b3167e19-aa75-8dc8-fb12-43d0adad6fe8/data/data3/current/BP-1167299508-172.17.0.2-1733644746014/current, will proceed with Du for space computation calculation, 2024-12-08T07:59:08,374 WARN [Thread-1636 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T07:59:08,376 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6633aaba1f1cf064 with lease ID 0x62fc307da52a836c: Processing first storage report for DS-dd56c0cf-114c-421f-8ce3-57f3ed4d9a1f from datanode DatanodeRegistration(127.0.0.1:42175, datanodeUuid=ccec0154-d0e2-4e21-9d64-62b551b2ac39, infoPort=38711, infoSecurePort=0, ipcPort=37581, storageInfo=lv=-57;cid=testClusterID;nsid=1648399136;c=1733644746014) 2024-12-08T07:59:08,376 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6633aaba1f1cf064 with lease ID 0x62fc307da52a836c: from storage DS-dd56c0cf-114c-421f-8ce3-57f3ed4d9a1f node DatanodeRegistration(127.0.0.1:42175, datanodeUuid=ccec0154-d0e2-4e21-9d64-62b551b2ac39, infoPort=38711, infoSecurePort=0, ipcPort=37581, storageInfo=lv=-57;cid=testClusterID;nsid=1648399136;c=1733644746014), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:59:08,376 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6633aaba1f1cf064 with lease ID 0x62fc307da52a836c: Processing first storage report for DS-f106133c-3301-4153-a459-203770395d22 from datanode DatanodeRegistration(127.0.0.1:42175, datanodeUuid=ccec0154-d0e2-4e21-9d64-62b551b2ac39, infoPort=38711, infoSecurePort=0, ipcPort=37581, storageInfo=lv=-57;cid=testClusterID;nsid=1648399136;c=1733644746014) 2024-12-08T07:59:08,376 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6633aaba1f1cf064 with lease ID 0x62fc307da52a836c: from storage DS-f106133c-3301-4153-a459-203770395d22 node DatanodeRegistration(127.0.0.1:42175, datanodeUuid=ccec0154-d0e2-4e21-9d64-62b551b2ac39, infoPort=38711, infoSecurePort=0, ipcPort=37581, storageInfo=lv=-57;cid=testClusterID;nsid=1648399136;c=1733644746014), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T07:59:08,468 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846 2024-12-08T07:59:08,470 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/cluster_b3167e19-aa75-8dc8-fb12-43d0adad6fe8/zookeeper_0, clientPort=63530, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/cluster_b3167e19-aa75-8dc8-fb12-43d0adad6fe8/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/cluster_b3167e19-aa75-8dc8-fb12-43d0adad6fe8/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-08T07:59:08,471 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63530 2024-12-08T07:59:08,471 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:59:08,473 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:59:08,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37527 is added to blk_1073741825_1001 (size=7) 2024-12-08T07:59:08,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42175 is added to blk_1073741825_1001 (size=7) 2024-12-08T07:59:08,483 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163 with version=8 2024-12-08T07:59:08,483 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/hbase-staging 2024-12-08T07:59:08,485 INFO [Time-limited test {}] client.ConnectionUtils(128): master/0106a245d0e8:0 server-side Connection retries=45 2024-12-08T07:59:08,485 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T07:59:08,485 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T07:59:08,485 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T07:59:08,485 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T07:59:08,485 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T07:59:08,485 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-08T07:59:08,485 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T07:59:08,486 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38765 2024-12-08T07:59:08,487 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38765 connecting to ZooKeeper ensemble=127.0.0.1:63530 2024-12-08T07:59:08,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:387650x0, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T07:59:08,559 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38765-0x100046f8d7d0000 connected 2024-12-08T07:59:08,650 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:59:08,652 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:59:08,655 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38765-0x100046f8d7d0000, quorum=127.0.0.1:63530, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T07:59:08,655 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163, hbase.cluster.distributed=false 2024-12-08T07:59:08,657 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38765-0x100046f8d7d0000, quorum=127.0.0.1:63530, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T07:59:08,658 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38765 2024-12-08T07:59:08,658 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38765 2024-12-08T07:59:08,658 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38765 2024-12-08T07:59:08,659 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38765 2024-12-08T07:59:08,659 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38765 2024-12-08T07:59:08,675 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0106a245d0e8:0 server-side Connection retries=45 2024-12-08T07:59:08,675 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T07:59:08,675 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T07:59:08,675 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T07:59:08,675 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T07:59:08,675 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T07:59:08,675 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T07:59:08,676 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T07:59:08,676 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37287 2024-12-08T07:59:08,677 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37287 connecting to ZooKeeper ensemble=127.0.0.1:63530 2024-12-08T07:59:08,678 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:59:08,679 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:59:08,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:372870x0, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T07:59:08,693 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:372870x0, quorum=127.0.0.1:63530, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T07:59:08,693 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37287-0x100046f8d7d0001 connected 2024-12-08T07:59:08,693 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T07:59:08,694 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T07:59:08,694 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37287-0x100046f8d7d0001, quorum=127.0.0.1:63530, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T07:59:08,695 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37287-0x100046f8d7d0001, quorum=127.0.0.1:63530, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T07:59:08,695 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37287 2024-12-08T07:59:08,696 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37287 2024-12-08T07:59:08,696 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37287 2024-12-08T07:59:08,696 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37287 2024-12-08T07:59:08,696 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37287 2024-12-08T07:59:08,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:08,710 DEBUG [M:0;0106a245d0e8:38765 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0106a245d0e8:38765 2024-12-08T07:59:08,711 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/0106a245d0e8,38765,1733644748484 2024-12-08T07:59:08,719 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37287-0x100046f8d7d0001, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T07:59:08,719 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38765-0x100046f8d7d0000, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T07:59:08,719 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38765-0x100046f8d7d0000, quorum=127.0.0.1:63530, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0106a245d0e8,38765,1733644748484 2024-12-08T07:59:08,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:08,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37287-0x100046f8d7d0001, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T07:59:08,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38765-0x100046f8d7d0000, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:59:08,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37287-0x100046f8d7d0001, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:59:08,730 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38765-0x100046f8d7d0000, quorum=127.0.0.1:63530, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T07:59:08,731 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0106a245d0e8,38765,1733644748484 from backup master directory 2024-12-08T07:59:08,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38765-0x100046f8d7d0000, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0106a245d0e8,38765,1733644748484 2024-12-08T07:59:08,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37287-0x100046f8d7d0001, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T07:59:08,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38765-0x100046f8d7d0000, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T07:59:08,740 WARN [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T07:59:08,740 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0106a245d0e8,38765,1733644748484 2024-12-08T07:59:08,744 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/hbase.id] with ID: d8f74ea2-8a98-4e0d-a1af-b9ef2fef4517 2024-12-08T07:59:08,744 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/.tmp/hbase.id 2024-12-08T07:59:08,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42175 is added to blk_1073741826_1002 (size=42) 2024-12-08T07:59:08,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37527 is added to blk_1073741826_1002 (size=42) 2024-12-08T07:59:08,752 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/.tmp/hbase.id]:[hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/hbase.id] 2024-12-08T07:59:08,765 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:59:08,765 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-08T07:59:08,766 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-08T07:59:08,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37287-0x100046f8d7d0001, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:59:08,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38765-0x100046f8d7d0000, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:59:08,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37527 is added to blk_1073741827_1003 (size=196) 2024-12-08T07:59:08,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42175 is added to blk_1073741827_1003 (size=196) 2024-12-08T07:59:08,785 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T07:59:08,786 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-08T07:59:08,786 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T07:59:08,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37527 is added to blk_1073741828_1004 (size=1189) 2024-12-08T07:59:08,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42175 is added to blk_1073741828_1004 (size=1189) 2024-12-08T07:59:08,794 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/MasterData/data/master/store 2024-12-08T07:59:08,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42175 is added to blk_1073741829_1005 (size=34) 2024-12-08T07:59:08,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37527 is added to blk_1073741829_1005 (size=34) 2024-12-08T07:59:08,801 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T07:59:08,801 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T07:59:08,801 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:59:08,801 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:59:08,801 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T07:59:08,801 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:59:08,801 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T07:59:08,802 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733644748801Disabling compacts and flushes for region at 1733644748801Disabling writes for close at 1733644748801Writing region close event to WAL at 1733644748801Closed at 1733644748801 2024-12-08T07:59:08,802 WARN [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/MasterData/data/master/store/.initializing 2024-12-08T07:59:08,802 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/MasterData/WALs/0106a245d0e8,38765,1733644748484 2024-12-08T07:59:08,805 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0106a245d0e8%2C38765%2C1733644748484, suffix=, logDir=hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/MasterData/WALs/0106a245d0e8,38765,1733644748484, archiveDir=hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/MasterData/oldWALs, maxLogs=10 2024-12-08T07:59:08,805 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C38765%2C1733644748484.1733644748805 2024-12-08T07:59:08,809 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/MasterData/WALs/0106a245d0e8,38765,1733644748484/0106a245d0e8%2C38765%2C1733644748484.1733644748805 2024-12-08T07:59:08,810 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38711:38711),(127.0.0.1/127.0.0.1:33665:33665)] 2024-12-08T07:59:08,811 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-08T07:59:08,811 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T07:59:08,811 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:59:08,811 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:59:08,812 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:59:08,814 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-08T07:59:08,814 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:59:08,814 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:59:08,814 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:59:08,815 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-08T07:59:08,815 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:59:08,816 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T07:59:08,816 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:59:08,817 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-08T07:59:08,817 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:59:08,817 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T07:59:08,818 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:59:08,819 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-08T07:59:08,819 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:59:08,819 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T07:59:08,819 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:59:08,820 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:59:08,820 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:59:08,822 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:59:08,822 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:59:08,823 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T07:59:08,824 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T07:59:08,826 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T07:59:08,826 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=806386, jitterRate=0.02537401020526886}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T07:59:08,827 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733644748811Initializing all the Stores at 1733644748812 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644748812Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644748812Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644748812Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644748812Cleaning up temporary data from old regions at 1733644748822 (+10 ms)Region opened successfully at 1733644748827 (+5 ms) 2024-12-08T07:59:08,827 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-08T07:59:08,831 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f7c7225, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0106a245d0e8/172.17.0.2:0 2024-12-08T07:59:08,832 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-08T07:59:08,832 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-08T07:59:08,832 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-08T07:59:08,832 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-08T07:59:08,833 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-08T07:59:08,833 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-08T07:59:08,833 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-08T07:59:08,835 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-08T07:59:08,836 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38765-0x100046f8d7d0000, quorum=127.0.0.1:63530, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-08T07:59:08,845 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-08T07:59:08,846 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-08T07:59:08,847 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38765-0x100046f8d7d0000, quorum=127.0.0.1:63530, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-08T07:59:08,856 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-08T07:59:08,856 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-08T07:59:08,858 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38765-0x100046f8d7d0000, quorum=127.0.0.1:63530, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-08T07:59:08,866 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-08T07:59:08,868 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38765-0x100046f8d7d0000, quorum=127.0.0.1:63530, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-08T07:59:08,877 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-08T07:59:08,880 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38765-0x100046f8d7d0000, quorum=127.0.0.1:63530, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-08T07:59:08,892 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-08T07:59:08,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37287-0x100046f8d7d0001, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T07:59:08,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38765-0x100046f8d7d0000, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T07:59:08,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38765-0x100046f8d7d0000, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:59:08,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37287-0x100046f8d7d0001, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:59:08,903 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=0106a245d0e8,38765,1733644748484, sessionid=0x100046f8d7d0000, setting cluster-up flag (Was=false) 2024-12-08T07:59:08,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38765-0x100046f8d7d0000, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:59:08,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37287-0x100046f8d7d0001, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:59:08,956 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-08T07:59:08,957 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0106a245d0e8,38765,1733644748484 2024-12-08T07:59:08,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37287-0x100046f8d7d0001, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:59:08,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38765-0x100046f8d7d0000, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:59:09,008 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-08T07:59:09,009 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0106a245d0e8,38765,1733644748484 2024-12-08T07:59:09,010 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-08T07:59:09,012 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-08T07:59:09,012 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-08T07:59:09,012 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-08T07:59:09,012 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0106a245d0e8,38765,1733644748484 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-08T07:59:09,014 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0106a245d0e8:0, corePoolSize=5, maxPoolSize=5 2024-12-08T07:59:09,014 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0106a245d0e8:0, corePoolSize=5, maxPoolSize=5 2024-12-08T07:59:09,014 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0106a245d0e8:0, corePoolSize=5, maxPoolSize=5 2024-12-08T07:59:09,014 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0106a245d0e8:0, corePoolSize=5, maxPoolSize=5 2024-12-08T07:59:09,014 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0106a245d0e8:0, corePoolSize=10, maxPoolSize=10 2024-12-08T07:59:09,014 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:59:09,014 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0106a245d0e8:0, corePoolSize=2, maxPoolSize=2 2024-12-08T07:59:09,014 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:59:09,015 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733644779015 2024-12-08T07:59:09,015 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-08T07:59:09,015 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-08T07:59:09,015 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-08T07:59:09,015 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-08T07:59:09,015 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-08T07:59:09,015 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-08T07:59:09,015 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T07:59:09,015 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-08T07:59:09,015 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-08T07:59:09,015 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-08T07:59:09,016 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T07:59:09,016 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-08T07:59:09,016 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-08T07:59:09,016 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-08T07:59:09,016 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.large.0-1733644749016,5,FailOnTimeoutGroup] 2024-12-08T07:59:09,016 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.small.0-1733644749016,5,FailOnTimeoutGroup] 2024-12-08T07:59:09,016 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T07:59:09,016 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-08T07:59:09,016 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-08T07:59:09,016 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-08T07:59:09,017 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:59:09,017 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-08T07:59:09,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37527 is added to blk_1073741831_1007 (size=1321) 2024-12-08T07:59:09,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42175 is added to blk_1073741831_1007 (size=1321) 2024-12-08T07:59:09,029 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-08T07:59:09,029 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163 2024-12-08T07:59:09,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42175 is added to blk_1073741832_1008 (size=32) 2024-12-08T07:59:09,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37527 is added to blk_1073741832_1008 (size=32) 2024-12-08T07:59:09,042 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T07:59:09,043 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T07:59:09,044 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T07:59:09,044 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:59:09,045 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:59:09,045 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T07:59:09,046 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T07:59:09,046 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:59:09,046 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:59:09,046 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T07:59:09,047 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T07:59:09,048 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:59:09,048 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:59:09,048 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T07:59:09,049 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T07:59:09,049 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:59:09,050 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:59:09,050 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T07:59:09,050 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/hbase/meta/1588230740 2024-12-08T07:59:09,051 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/hbase/meta/1588230740 2024-12-08T07:59:09,052 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T07:59:09,052 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T07:59:09,052 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T07:59:09,053 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T07:59:09,055 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T07:59:09,056 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=877305, jitterRate=0.11555148661136627}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T07:59:09,056 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733644749042Initializing all the Stores at 1733644749043 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644749043Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644749043Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644749043Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644749043Cleaning up temporary data from old regions at 1733644749052 (+9 ms)Region opened successfully at 1733644749056 (+4 ms) 2024-12-08T07:59:09,056 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T07:59:09,056 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T07:59:09,056 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T07:59:09,056 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T07:59:09,056 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T07:59:09,057 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T07:59:09,057 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733644749056Disabling compacts and flushes for region at 1733644749056Disabling writes for close at 1733644749056Writing region close event to WAL at 1733644749057 (+1 ms)Closed at 1733644749057 2024-12-08T07:59:09,058 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T07:59:09,058 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-08T07:59:09,058 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-08T07:59:09,059 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T07:59:09,060 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-08T07:59:09,098 INFO [RS:0;0106a245d0e8:37287 {}] regionserver.HRegionServer(746): ClusterId : d8f74ea2-8a98-4e0d-a1af-b9ef2fef4517 2024-12-08T07:59:09,098 DEBUG [RS:0;0106a245d0e8:37287 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T07:59:09,147 DEBUG [RS:0;0106a245d0e8:37287 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T07:59:09,147 DEBUG [RS:0;0106a245d0e8:37287 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T07:59:09,162 DEBUG [RS:0;0106a245d0e8:37287 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T07:59:09,162 DEBUG [RS:0;0106a245d0e8:37287 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6fd392b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0106a245d0e8/172.17.0.2:0 2024-12-08T07:59:09,176 DEBUG [RS:0;0106a245d0e8:37287 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0106a245d0e8:37287 2024-12-08T07:59:09,176 INFO [RS:0;0106a245d0e8:37287 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T07:59:09,176 INFO [RS:0;0106a245d0e8:37287 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T07:59:09,176 DEBUG [RS:0;0106a245d0e8:37287 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T07:59:09,177 INFO [RS:0;0106a245d0e8:37287 {}] regionserver.HRegionServer(2659): reportForDuty to master=0106a245d0e8,38765,1733644748484 with port=37287, startcode=1733644748675 2024-12-08T07:59:09,177 DEBUG [RS:0;0106a245d0e8:37287 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T07:59:09,179 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50211, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T07:59:09,179 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38765 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0106a245d0e8,37287,1733644748675 2024-12-08T07:59:09,179 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38765 {}] master.ServerManager(517): Registering regionserver=0106a245d0e8,37287,1733644748675 2024-12-08T07:59:09,181 DEBUG [RS:0;0106a245d0e8:37287 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163 2024-12-08T07:59:09,181 DEBUG [RS:0;0106a245d0e8:37287 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35287 2024-12-08T07:59:09,181 DEBUG [RS:0;0106a245d0e8:37287 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T07:59:09,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38765-0x100046f8d7d0000, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T07:59:09,193 DEBUG [RS:0;0106a245d0e8:37287 {}] zookeeper.ZKUtil(111): regionserver:37287-0x100046f8d7d0001, quorum=127.0.0.1:63530, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0106a245d0e8,37287,1733644748675 2024-12-08T07:59:09,193 WARN [RS:0;0106a245d0e8:37287 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T07:59:09,193 INFO [RS:0;0106a245d0e8:37287 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T07:59:09,193 DEBUG [RS:0;0106a245d0e8:37287 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/WALs/0106a245d0e8,37287,1733644748675 2024-12-08T07:59:09,194 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0106a245d0e8,37287,1733644748675] 2024-12-08T07:59:09,196 INFO [RS:0;0106a245d0e8:37287 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T07:59:09,198 INFO [RS:0;0106a245d0e8:37287 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T07:59:09,199 INFO [RS:0;0106a245d0e8:37287 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T07:59:09,199 INFO [RS:0;0106a245d0e8:37287 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T07:59:09,199 INFO [RS:0;0106a245d0e8:37287 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T07:59:09,200 INFO [RS:0;0106a245d0e8:37287 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T07:59:09,200 INFO [RS:0;0106a245d0e8:37287 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T07:59:09,200 DEBUG [RS:0;0106a245d0e8:37287 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:59:09,200 DEBUG [RS:0;0106a245d0e8:37287 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:59:09,200 DEBUG [RS:0;0106a245d0e8:37287 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:59:09,200 DEBUG [RS:0;0106a245d0e8:37287 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:59:09,200 DEBUG [RS:0;0106a245d0e8:37287 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:59:09,200 DEBUG [RS:0;0106a245d0e8:37287 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0106a245d0e8:0, corePoolSize=2, maxPoolSize=2 2024-12-08T07:59:09,200 DEBUG [RS:0;0106a245d0e8:37287 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:59:09,200 DEBUG [RS:0;0106a245d0e8:37287 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:59:09,200 DEBUG [RS:0;0106a245d0e8:37287 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:59:09,200 DEBUG [RS:0;0106a245d0e8:37287 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:59:09,200 DEBUG [RS:0;0106a245d0e8:37287 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:59:09,201 DEBUG [RS:0;0106a245d0e8:37287 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T07:59:09,201 DEBUG [RS:0;0106a245d0e8:37287 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0106a245d0e8:0, corePoolSize=3, maxPoolSize=3 2024-12-08T07:59:09,201 DEBUG [RS:0;0106a245d0e8:37287 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0, corePoolSize=3, maxPoolSize=3 2024-12-08T07:59:09,201 INFO [RS:0;0106a245d0e8:37287 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T07:59:09,201 INFO [RS:0;0106a245d0e8:37287 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T07:59:09,201 INFO [RS:0;0106a245d0e8:37287 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T07:59:09,201 INFO [RS:0;0106a245d0e8:37287 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T07:59:09,201 INFO [RS:0;0106a245d0e8:37287 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T07:59:09,201 INFO [RS:0;0106a245d0e8:37287 {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,37287,1733644748675-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T07:59:09,211 WARN [0106a245d0e8:38765 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-08T07:59:09,219 INFO [RS:0;0106a245d0e8:37287 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T07:59:09,219 INFO [RS:0;0106a245d0e8:37287 {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,37287,1733644748675-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T07:59:09,219 INFO [RS:0;0106a245d0e8:37287 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T07:59:09,219 INFO [RS:0;0106a245d0e8:37287 {}] regionserver.Replication(171): 0106a245d0e8,37287,1733644748675 started 2024-12-08T07:59:09,232 INFO [RS:0;0106a245d0e8:37287 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T07:59:09,232 INFO [RS:0;0106a245d0e8:37287 {}] regionserver.HRegionServer(1482): Serving as 0106a245d0e8,37287,1733644748675, RpcServer on 0106a245d0e8/172.17.0.2:37287, sessionid=0x100046f8d7d0001 2024-12-08T07:59:09,232 DEBUG [RS:0;0106a245d0e8:37287 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T07:59:09,232 DEBUG [RS:0;0106a245d0e8:37287 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0106a245d0e8,37287,1733644748675 2024-12-08T07:59:09,232 DEBUG [RS:0;0106a245d0e8:37287 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0106a245d0e8,37287,1733644748675' 2024-12-08T07:59:09,232 DEBUG [RS:0;0106a245d0e8:37287 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T07:59:09,233 DEBUG [RS:0;0106a245d0e8:37287 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T07:59:09,233 DEBUG [RS:0;0106a245d0e8:37287 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T07:59:09,233 DEBUG [RS:0;0106a245d0e8:37287 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T07:59:09,233 DEBUG [RS:0;0106a245d0e8:37287 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0106a245d0e8,37287,1733644748675 2024-12-08T07:59:09,233 DEBUG [RS:0;0106a245d0e8:37287 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0106a245d0e8,37287,1733644748675' 2024-12-08T07:59:09,234 DEBUG [RS:0;0106a245d0e8:37287 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T07:59:09,234 DEBUG [RS:0;0106a245d0e8:37287 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T07:59:09,234 DEBUG [RS:0;0106a245d0e8:37287 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T07:59:09,234 INFO [RS:0;0106a245d0e8:37287 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T07:59:09,234 INFO [RS:0;0106a245d0e8:37287 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T07:59:09,337 INFO [RS:0;0106a245d0e8:37287 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0106a245d0e8%2C37287%2C1733644748675, suffix=, logDir=hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/WALs/0106a245d0e8,37287,1733644748675, archiveDir=hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/oldWALs, maxLogs=32 2024-12-08T07:59:09,337 INFO [RS:0;0106a245d0e8:37287 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C37287%2C1733644748675.1733644749337 2024-12-08T07:59:09,345 INFO [RS:0;0106a245d0e8:37287 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/WALs/0106a245d0e8,37287,1733644748675/0106a245d0e8%2C37287%2C1733644748675.1733644749337 2024-12-08T07:59:09,346 DEBUG [RS:0;0106a245d0e8:37287 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33665:33665),(127.0.0.1/127.0.0.1:38711:38711)] 2024-12-08T07:59:09,461 DEBUG [0106a245d0e8:38765 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-08T07:59:09,462 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0106a245d0e8,37287,1733644748675 2024-12-08T07:59:09,463 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0106a245d0e8,37287,1733644748675, state=OPENING 2024-12-08T07:59:09,530 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-08T07:59:09,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37287-0x100046f8d7d0001, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:59:09,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38765-0x100046f8d7d0000, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T07:59:09,541 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T07:59:09,541 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T07:59:09,541 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T07:59:09,541 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=0106a245d0e8,37287,1733644748675}] 2024-12-08T07:59:09,694 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-08T07:59:09,696 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54003, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-08T07:59:09,700 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-08T07:59:09,700 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T07:59:09,702 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0106a245d0e8%2C37287%2C1733644748675.meta, suffix=.meta, logDir=hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/WALs/0106a245d0e8,37287,1733644748675, archiveDir=hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/oldWALs, maxLogs=32 2024-12-08T07:59:09,703 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C37287%2C1733644748675.meta.1733644749702.meta 2024-12-08T07:59:09,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:09,711 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/WALs/0106a245d0e8,37287,1733644748675/0106a245d0e8%2C37287%2C1733644748675.meta.1733644749702.meta 2024-12-08T07:59:09,712 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38711:38711),(127.0.0.1/127.0.0.1:33665:33665)] 2024-12-08T07:59:09,713 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-08T07:59:09,713 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-08T07:59:09,713 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-08T07:59:09,713 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-08T07:59:09,713 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-08T07:59:09,713 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T07:59:09,713 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-08T07:59:09,713 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-08T07:59:09,715 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T07:59:09,716 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T07:59:09,716 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:59:09,717 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:59:09,717 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T07:59:09,717 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T07:59:09,717 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:59:09,718 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:59:09,718 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T07:59:09,719 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T07:59:09,719 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:59:09,719 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:59:09,719 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T07:59:09,720 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T07:59:09,720 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:59:09,720 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T07:59:09,721 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T07:59:09,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:09,721 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/hbase/meta/1588230740 2024-12-08T07:59:09,722 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/hbase/meta/1588230740 2024-12-08T07:59:09,723 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T07:59:09,723 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T07:59:09,724 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T07:59:09,725 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T07:59:09,726 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=711350, jitterRate=-0.09547276794910431}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T07:59:09,726 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-08T07:59:09,726 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733644749714Writing region info on filesystem at 1733644749714Initializing all the Stores at 1733644749714Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644749714Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644749715 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644749715Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644749715Cleaning up temporary data from old regions at 1733644749723 (+8 ms)Running coprocessor post-open hooks at 1733644749726 (+3 ms)Region opened successfully at 1733644749726 2024-12-08T07:59:09,727 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733644749694 2024-12-08T07:59:09,729 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-08T07:59:09,729 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-08T07:59:09,730 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0106a245d0e8,37287,1733644748675 2024-12-08T07:59:09,731 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0106a245d0e8,37287,1733644748675, state=OPEN 2024-12-08T07:59:09,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37287-0x100046f8d7d0001, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T07:59:09,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38765-0x100046f8d7d0000, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T07:59:09,780 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=0106a245d0e8,37287,1733644748675 2024-12-08T07:59:09,780 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T07:59:09,780 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T07:59:09,783 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-08T07:59:09,784 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=0106a245d0e8,37287,1733644748675 in 239 msec 2024-12-08T07:59:09,786 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-08T07:59:09,786 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 725 msec 2024-12-08T07:59:09,787 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T07:59:09,787 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-08T07:59:09,789 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T07:59:09,789 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0106a245d0e8,37287,1733644748675, seqNum=-1] 2024-12-08T07:59:09,789 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T07:59:09,790 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50577, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T07:59:09,797 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 783 msec 2024-12-08T07:59:09,797 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733644749797, completionTime=-1 2024-12-08T07:59:09,797 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-08T07:59:09,797 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-08T07:59:09,799 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-08T07:59:09,799 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733644809799 2024-12-08T07:59:09,799 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733644869799 2024-12-08T07:59:09,799 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-08T07:59:09,799 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,38765,1733644748484-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T07:59:09,799 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,38765,1733644748484-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T07:59:09,799 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,38765,1733644748484-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T07:59:09,800 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0106a245d0e8:38765, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T07:59:09,800 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-08T07:59:09,800 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-08T07:59:09,802 DEBUG [master/0106a245d0e8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-08T07:59:09,804 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.064sec 2024-12-08T07:59:09,804 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-08T07:59:09,804 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-08T07:59:09,804 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-08T07:59:09,804 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-08T07:59:09,804 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-08T07:59:09,804 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,38765,1733644748484-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T07:59:09,804 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,38765,1733644748484-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-08T07:59:09,807 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-08T07:59:09,807 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-08T07:59:09,807 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,38765,1733644748484-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T07:59:09,899 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7117bc03, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T07:59:09,899 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 0106a245d0e8,38765,-1 for getting cluster id 2024-12-08T07:59:09,899 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-08T07:59:09,901 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd8f74ea2-8a98-4e0d-a1af-b9ef2fef4517' 2024-12-08T07:59:09,902 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-08T07:59:09,902 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d8f74ea2-8a98-4e0d-a1af-b9ef2fef4517" 2024-12-08T07:59:09,902 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5372ab11, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T07:59:09,902 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [0106a245d0e8,38765,-1] 2024-12-08T07:59:09,902 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-08T07:59:09,903 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T07:59:09,904 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38866, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-08T07:59:09,905 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b2c89d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T07:59:09,906 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T07:59:09,907 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0106a245d0e8,37287,1733644748675, seqNum=-1] 2024-12-08T07:59:09,908 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T07:59:09,909 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45692, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T07:59:09,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=0106a245d0e8,38765,1733644748484 2024-12-08T07:59:09,912 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T07:59:09,915 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-08T07:59:09,915 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-08T07:59:09,916 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 0106a245d0e8,38765,1733644748484 2024-12-08T07:59:09,916 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@697b5021 2024-12-08T07:59:09,916 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T07:59:09,918 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38880, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T07:59:09,918 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38765 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-08T07:59:09,918 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38765 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-08T07:59:09,919 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38765 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T07:59:09,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38765 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T07:59:09,922 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T07:59:09,922 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:59:09,922 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38765 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-12-08T07:59:09,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38765 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T07:59:09,923 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T07:59:09,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42175 is added to blk_1073741835_1011 (size=405) 2024-12-08T07:59:09,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37527 is added to blk_1073741835_1011 (size=405) 2024-12-08T07:59:09,931 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => da76fd30cf8a9b7d5699f15f332f2a37, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163 2024-12-08T07:59:09,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42175 is added to blk_1073741836_1012 (size=88) 2024-12-08T07:59:09,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37527 is added to blk_1073741836_1012 (size=88) 2024-12-08T07:59:09,938 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T07:59:09,938 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing da76fd30cf8a9b7d5699f15f332f2a37, disabling compactions & flushes 2024-12-08T07:59:09,938 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37. 2024-12-08T07:59:09,938 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37. 2024-12-08T07:59:09,938 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37. after waiting 0 ms 2024-12-08T07:59:09,938 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37. 2024-12-08T07:59:09,938 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37. 2024-12-08T07:59:09,938 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for da76fd30cf8a9b7d5699f15f332f2a37: Waiting for close lock at 1733644749938Disabling compacts and flushes for region at 1733644749938Disabling writes for close at 1733644749938Writing region close event to WAL at 1733644749938Closed at 1733644749938 2024-12-08T07:59:09,939 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T07:59:09,940 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1733644749940"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733644749940"}]},"ts":"1733644749940"} 2024-12-08T07:59:09,942 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-08T07:59:09,943 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T07:59:09,943 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733644749943"}]},"ts":"1733644749943"} 2024-12-08T07:59:09,946 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-12-08T07:59:09,946 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=da76fd30cf8a9b7d5699f15f332f2a37, ASSIGN}] 2024-12-08T07:59:09,947 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=da76fd30cf8a9b7d5699f15f332f2a37, ASSIGN 2024-12-08T07:59:09,948 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=da76fd30cf8a9b7d5699f15f332f2a37, ASSIGN; state=OFFLINE, location=0106a245d0e8,37287,1733644748675; forceNewPlan=false, retain=false 2024-12-08T07:59:10,099 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=da76fd30cf8a9b7d5699f15f332f2a37, regionState=OPENING, regionLocation=0106a245d0e8,37287,1733644748675 2024-12-08T07:59:10,103 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=da76fd30cf8a9b7d5699f15f332f2a37, ASSIGN because future has completed 2024-12-08T07:59:10,104 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure da76fd30cf8a9b7d5699f15f332f2a37, server=0106a245d0e8,37287,1733644748675}] 2024-12-08T07:59:10,262 INFO [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37. 2024-12-08T07:59:10,262 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => da76fd30cf8a9b7d5699f15f332f2a37, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37.', STARTKEY => '', ENDKEY => ''} 2024-12-08T07:59:10,262 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling da76fd30cf8a9b7d5699f15f332f2a37 2024-12-08T07:59:10,262 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T07:59:10,263 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for da76fd30cf8a9b7d5699f15f332f2a37 2024-12-08T07:59:10,263 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for da76fd30cf8a9b7d5699f15f332f2a37 2024-12-08T07:59:10,264 INFO [StoreOpener-da76fd30cf8a9b7d5699f15f332f2a37-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region da76fd30cf8a9b7d5699f15f332f2a37 2024-12-08T07:59:10,265 INFO [StoreOpener-da76fd30cf8a9b7d5699f15f332f2a37-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region da76fd30cf8a9b7d5699f15f332f2a37 columnFamilyName info 2024-12-08T07:59:10,265 DEBUG [StoreOpener-da76fd30cf8a9b7d5699f15f332f2a37-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T07:59:10,266 INFO [StoreOpener-da76fd30cf8a9b7d5699f15f332f2a37-1 {}] regionserver.HStore(327): Store=da76fd30cf8a9b7d5699f15f332f2a37/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T07:59:10,266 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for da76fd30cf8a9b7d5699f15f332f2a37 2024-12-08T07:59:10,267 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37 2024-12-08T07:59:10,267 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37 2024-12-08T07:59:10,268 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for da76fd30cf8a9b7d5699f15f332f2a37 2024-12-08T07:59:10,268 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for da76fd30cf8a9b7d5699f15f332f2a37 2024-12-08T07:59:10,269 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for da76fd30cf8a9b7d5699f15f332f2a37 2024-12-08T07:59:10,271 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T07:59:10,272 INFO [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened da76fd30cf8a9b7d5699f15f332f2a37; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=698066, jitterRate=-0.11236430704593658}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-08T07:59:10,272 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for da76fd30cf8a9b7d5699f15f332f2a37 2024-12-08T07:59:10,273 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for da76fd30cf8a9b7d5699f15f332f2a37: Running coprocessor pre-open hook at 1733644750263Writing region info on filesystem at 1733644750263Initializing all the Stores at 1733644750264 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644750264Cleaning up temporary data from old regions at 1733644750268 (+4 ms)Running coprocessor post-open hooks at 1733644750272 (+4 ms)Region opened successfully at 1733644750273 (+1 ms) 2024-12-08T07:59:10,274 INFO [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37., pid=6, masterSystemTime=1733644750258 2024-12-08T07:59:10,276 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37. 2024-12-08T07:59:10,277 INFO [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37. 2024-12-08T07:59:10,278 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=da76fd30cf8a9b7d5699f15f332f2a37, regionState=OPEN, openSeqNum=2, regionLocation=0106a245d0e8,37287,1733644748675 2024-12-08T07:59:10,280 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure da76fd30cf8a9b7d5699f15f332f2a37, server=0106a245d0e8,37287,1733644748675 because future has completed 2024-12-08T07:59:10,284 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-08T07:59:10,284 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure da76fd30cf8a9b7d5699f15f332f2a37, server=0106a245d0e8,37287,1733644748675 in 177 msec 2024-12-08T07:59:10,287 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-08T07:59:10,287 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=da76fd30cf8a9b7d5699f15f332f2a37, ASSIGN in 338 msec 2024-12-08T07:59:10,288 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T07:59:10,288 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733644750288"}]},"ts":"1733644750288"} 2024-12-08T07:59:10,290 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-12-08T07:59:10,291 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T07:59:10,293 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 372 msec 2024-12-08T07:59:10,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:10,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:11,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:11,722 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:12,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:12,722 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:13,671 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-08T07:59:13,672 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:13,672 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:13,673 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:13,673 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:13,673 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:13,674 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:13,703 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:13,703 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:13,704 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:13,704 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:13,704 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:13,705 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:13,709 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:13,710 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:13,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:13,710 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:13,713 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T07:59:13,723 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:14,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:14,723 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:15,197 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-08T07:59:15,197 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-12-08T07:59:15,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:15,724 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:16,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:16,724 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:17,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:17,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:18,168 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-08T07:59:18,168 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-08T07:59:18,169 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T07:59:18,169 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-08T07:59:18,169 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-08T07:59:18,169 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-08T07:59:18,170 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T07:59:18,170 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-12-08T07:59:18,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:18,726 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:19,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:19,726 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:19,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38765 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T07:59:19,981 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-08T07:59:19,981 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-12-08T07:59:19,984 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T07:59:19,984 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37. 2024-12-08T07:59:19,987 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37., hostname=0106a245d0e8,37287,1733644748675, seqNum=2] 2024-12-08T07:59:19,995 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38765 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T07:59:20,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38765 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T07:59:20,025 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-08T07:59:20,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38765 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-08T07:59:20,026 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T07:59:20,028 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T07:59:20,191 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37287 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-08T07:59:20,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37. 2024-12-08T07:59:20,192 INFO [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing da76fd30cf8a9b7d5699f15f332f2a37 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-08T07:59:20,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/.tmp/info/1e60203e521944d3898a3c20380baecf is 1080, key is row0001/info:/1733644759988/Put/seqid=0 2024-12-08T07:59:20,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37527 is added to blk_1073741837_1013 (size=6033) 2024-12-08T07:59:20,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42175 is added to blk_1073741837_1013 (size=6033) 2024-12-08T07:59:20,218 INFO [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/.tmp/info/1e60203e521944d3898a3c20380baecf 2024-12-08T07:59:20,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/.tmp/info/1e60203e521944d3898a3c20380baecf as hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/info/1e60203e521944d3898a3c20380baecf 2024-12-08T07:59:20,230 INFO [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/info/1e60203e521944d3898a3c20380baecf, entries=1, sequenceid=5, filesize=5.9 K 2024-12-08T07:59:20,231 INFO [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for da76fd30cf8a9b7d5699f15f332f2a37 in 39ms, sequenceid=5, compaction requested=false 2024-12-08T07:59:20,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for da76fd30cf8a9b7d5699f15f332f2a37: 2024-12-08T07:59:20,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37. 2024-12-08T07:59:20,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-08T07:59:20,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38765 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-08T07:59:20,238 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-08T07:59:20,238 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 207 msec 2024-12-08T07:59:20,240 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 243 msec 2024-12-08T07:59:20,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:20,727 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:21,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:21,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:22,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:22,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:23,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:23,729 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:24,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:24,729 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:25,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:25,730 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:26,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:26,731 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:27,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:27,731 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:28,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:28,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:29,722 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:29,722 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 after 68057ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:59:29,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:29,733 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta after 68041ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T07:59:30,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38765 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-08T07:59:30,132 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-08T07:59:30,136 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38765 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T07:59:30,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38765 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T07:59:30,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38765 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-08T07:59:30,139 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-08T07:59:30,140 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T07:59:30,140 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T07:59:30,294 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37287 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-12-08T07:59:30,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37. 2024-12-08T07:59:30,295 INFO [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing da76fd30cf8a9b7d5699f15f332f2a37 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-08T07:59:30,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/.tmp/info/d2cba1d900b04989b81e56a08f7b0a16 is 1080, key is row0002/info:/1733644770133/Put/seqid=0 2024-12-08T07:59:30,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37527 is added to blk_1073741838_1014 (size=6033) 2024-12-08T07:59:30,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42175 is added to blk_1073741838_1014 (size=6033) 2024-12-08T07:59:30,306 INFO [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/.tmp/info/d2cba1d900b04989b81e56a08f7b0a16 2024-12-08T07:59:30,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/.tmp/info/d2cba1d900b04989b81e56a08f7b0a16 as hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/info/d2cba1d900b04989b81e56a08f7b0a16 2024-12-08T07:59:30,318 INFO [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/info/d2cba1d900b04989b81e56a08f7b0a16, entries=1, sequenceid=9, filesize=5.9 K 2024-12-08T07:59:30,319 INFO [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for da76fd30cf8a9b7d5699f15f332f2a37 in 24ms, sequenceid=9, compaction requested=false 2024-12-08T07:59:30,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for da76fd30cf8a9b7d5699f15f332f2a37: 2024-12-08T07:59:30,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37. 2024-12-08T07:59:30,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-12-08T07:59:30,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38765 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-12-08T07:59:30,324 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-08T07:59:30,324 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 181 msec 2024-12-08T07:59:30,327 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 189 msec 2024-12-08T07:59:30,723 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:30,733 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:31,723 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:31,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:32,724 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:32,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:33,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:33,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:34,726 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:34,736 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:35,726 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:35,736 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:36,727 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:36,737 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:37,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:37,737 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:38,468 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T07:59:38,729 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:38,738 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:39,729 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:39,738 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:40,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38765 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-08T07:59:40,222 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-08T07:59:40,226 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C37287%2C1733644748675.1733644780225 2024-12-08T07:59:40,232 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:59:40,233 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:59:40,233 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:59:40,233 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:59:40,233 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:59:40,233 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/WALs/0106a245d0e8,37287,1733644748675/0106a245d0e8%2C37287%2C1733644748675.1733644749337 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/WALs/0106a245d0e8,37287,1733644748675/0106a245d0e8%2C37287%2C1733644748675.1733644780225 2024-12-08T07:59:40,234 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33665:33665),(127.0.0.1/127.0.0.1:38711:38711)] 2024-12-08T07:59:40,234 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/WALs/0106a245d0e8,37287,1733644748675/0106a245d0e8%2C37287%2C1733644748675.1733644749337 is not closed yet, will try archiving it next time 2024-12-08T07:59:40,235 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38765 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T07:59:40,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42175 is added to blk_1073741833_1009 (size=5546) 2024-12-08T07:59:40,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37527 is added to blk_1073741833_1009 (size=5546) 2024-12-08T07:59:40,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38765 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T07:59:40,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38765 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-08T07:59:40,238 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-08T07:59:40,239 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T07:59:40,239 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T07:59:40,392 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37287 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-12-08T07:59:40,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37. 2024-12-08T07:59:40,393 INFO [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing da76fd30cf8a9b7d5699f15f332f2a37 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-08T07:59:40,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/.tmp/info/d63f659087994db1ab8e612a959a5d48 is 1080, key is row0003/info:/1733644780224/Put/seqid=0 2024-12-08T07:59:40,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42175 is added to blk_1073741840_1016 (size=6033) 2024-12-08T07:59:40,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37527 is added to blk_1073741840_1016 (size=6033) 2024-12-08T07:59:40,403 INFO [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/.tmp/info/d63f659087994db1ab8e612a959a5d48 2024-12-08T07:59:40,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/.tmp/info/d63f659087994db1ab8e612a959a5d48 as hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/info/d63f659087994db1ab8e612a959a5d48 2024-12-08T07:59:40,416 INFO [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/info/d63f659087994db1ab8e612a959a5d48, entries=1, sequenceid=13, filesize=5.9 K 2024-12-08T07:59:40,417 INFO [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for da76fd30cf8a9b7d5699f15f332f2a37 in 24ms, sequenceid=13, compaction requested=true 2024-12-08T07:59:40,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for da76fd30cf8a9b7d5699f15f332f2a37: 2024-12-08T07:59:40,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37. 2024-12-08T07:59:40,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-12-08T07:59:40,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38765 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-12-08T07:59:40,421 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-12-08T07:59:40,421 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 180 msec 2024-12-08T07:59:40,424 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 187 msec 2024-12-08T07:59:40,730 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:40,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:41,730 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:41,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:42,731 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:42,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:43,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:43,741 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:44,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:44,741 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:45,733 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:45,742 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:46,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:46,742 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:47,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:47,743 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:48,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:48,743 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:49,737 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:49,744 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:50,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38765 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-08T07:59:50,331 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-08T07:59:50,331 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T07:59:50,332 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T07:59:50,332 DEBUG [Time-limited test {}] regionserver.HStore(1541): da76fd30cf8a9b7d5699f15f332f2a37/info is initiating minor compaction (all files) 2024-12-08T07:59:50,332 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T07:59:50,332 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T07:59:50,333 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of da76fd30cf8a9b7d5699f15f332f2a37/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37. 2024-12-08T07:59:50,333 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/info/1e60203e521944d3898a3c20380baecf, hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/info/d2cba1d900b04989b81e56a08f7b0a16, hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/info/d63f659087994db1ab8e612a959a5d48] into tmpdir=hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/.tmp, totalSize=17.7 K 2024-12-08T07:59:50,333 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 1e60203e521944d3898a3c20380baecf, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733644759988 2024-12-08T07:59:50,334 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting d2cba1d900b04989b81e56a08f7b0a16, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1733644770133 2024-12-08T07:59:50,334 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting d63f659087994db1ab8e612a959a5d48, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733644780224 2024-12-08T07:59:50,351 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): da76fd30cf8a9b7d5699f15f332f2a37#info#compaction#45 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T07:59:50,352 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/.tmp/info/b986876c92a143b997ed34e0da940f3c is 1080, key is row0001/info:/1733644759988/Put/seqid=0 2024-12-08T07:59:50,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42175 is added to blk_1073741841_1017 (size=8296) 2024-12-08T07:59:50,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37527 is added to blk_1073741841_1017 (size=8296) 2024-12-08T07:59:50,365 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/.tmp/info/b986876c92a143b997ed34e0da940f3c as hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/info/b986876c92a143b997ed34e0da940f3c 2024-12-08T07:59:50,372 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in da76fd30cf8a9b7d5699f15f332f2a37/info of da76fd30cf8a9b7d5699f15f332f2a37 into b986876c92a143b997ed34e0da940f3c(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T07:59:50,372 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for da76fd30cf8a9b7d5699f15f332f2a37: 2024-12-08T07:59:50,375 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C37287%2C1733644748675.1733644790375 2024-12-08T07:59:50,387 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:59:50,387 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:59:50,387 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:59:50,387 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:59:50,388 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T07:59:50,388 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/WALs/0106a245d0e8,37287,1733644748675/0106a245d0e8%2C37287%2C1733644748675.1733644780225 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/WALs/0106a245d0e8,37287,1733644748675/0106a245d0e8%2C37287%2C1733644748675.1733644790375 2024-12-08T07:59:50,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42175 is added to blk_1073741839_1015 (size=2520) 2024-12-08T07:59:50,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37527 is added to blk_1073741839_1015 (size=2520) 2024-12-08T07:59:50,397 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38711:38711),(127.0.0.1/127.0.0.1:33665:33665)] 2024-12-08T07:59:50,397 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/WALs/0106a245d0e8,37287,1733644748675/0106a245d0e8%2C37287%2C1733644748675.1733644749337 to hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/oldWALs/0106a245d0e8%2C37287%2C1733644748675.1733644749337 2024-12-08T07:59:50,398 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38765 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T07:59:50,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38765 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T07:59:50,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38765 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-08T07:59:50,400 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-08T07:59:50,401 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T07:59:50,401 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T07:59:50,451 INFO [master/0106a245d0e8:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-08T07:59:50,451 INFO [master/0106a245d0e8:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-08T07:59:50,554 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37287 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-12-08T07:59:50,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37. 2024-12-08T07:59:50,554 INFO [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing da76fd30cf8a9b7d5699f15f332f2a37 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-08T07:59:50,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/.tmp/info/3903db976d82427bad016c1cceb0b0f1 is 1080, key is row0000/info:/1733644790373/Put/seqid=0 2024-12-08T07:59:50,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42175 is added to blk_1073741843_1019 (size=6033) 2024-12-08T07:59:50,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37527 is added to blk_1073741843_1019 (size=6033) 2024-12-08T07:59:50,566 INFO [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/.tmp/info/3903db976d82427bad016c1cceb0b0f1 2024-12-08T07:59:50,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/.tmp/info/3903db976d82427bad016c1cceb0b0f1 as hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/info/3903db976d82427bad016c1cceb0b0f1 2024-12-08T07:59:50,581 INFO [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/info/3903db976d82427bad016c1cceb0b0f1, entries=1, sequenceid=18, filesize=5.9 K 2024-12-08T07:59:50,582 INFO [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for da76fd30cf8a9b7d5699f15f332f2a37 in 28ms, sequenceid=18, compaction requested=false 2024-12-08T07:59:50,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for da76fd30cf8a9b7d5699f15f332f2a37: 2024-12-08T07:59:50,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37. 2024-12-08T07:59:50,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-08T07:59:50,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38765 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-08T07:59:50,586 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-12-08T07:59:50,586 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 182 msec 2024-12-08T07:59:50,588 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 189 msec 2024-12-08T07:59:50,737 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:50,745 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:51,738 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:51,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:52,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:52,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:53,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:53,747 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:54,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:54,747 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:55,263 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region da76fd30cf8a9b7d5699f15f332f2a37, had cached 0 bytes from a total of 14329 2024-12-08T07:59:55,741 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:55,748 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:56,741 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:56,749 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:57,742 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:57,749 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:58,743 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:58,750 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:59,743 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T07:59:59,751 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:00,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38765 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-08T08:00:00,471 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-08T08:00:00,474 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C37287%2C1733644748675.1733644800474 2024-12-08T08:00:00,481 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:00:00,481 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:00:00,481 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:00:00,481 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:00:00,481 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:00:00,481 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/WALs/0106a245d0e8,37287,1733644748675/0106a245d0e8%2C37287%2C1733644748675.1733644790375 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/WALs/0106a245d0e8,37287,1733644748675/0106a245d0e8%2C37287%2C1733644748675.1733644800474 2024-12-08T08:00:00,482 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38711:38711),(127.0.0.1/127.0.0.1:33665:33665)] 2024-12-08T08:00:00,482 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/WALs/0106a245d0e8,37287,1733644748675/0106a245d0e8%2C37287%2C1733644748675.1733644790375 is not closed yet, will try archiving it next time 2024-12-08T08:00:00,482 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/WALs/0106a245d0e8,37287,1733644748675/0106a245d0e8%2C37287%2C1733644748675.1733644780225 to hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/oldWALs/0106a245d0e8%2C37287%2C1733644748675.1733644780225 2024-12-08T08:00:00,482 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-08T08:00:00,483 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T08:00:00,483 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T08:00:00,483 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T08:00:00,483 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T08:00:00,483 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-08T08:00:00,483 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-08T08:00:00,483 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1274133240, stopped=false 2024-12-08T08:00:00,483 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=0106a245d0e8,38765,1733644748484 2024-12-08T08:00:00,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37527 is added to blk_1073741842_1018 (size=2026) 2024-12-08T08:00:00,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42175 is added to blk_1073741842_1018 (size=2026) 2024-12-08T08:00:00,553 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T08:00:00,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38765-0x100046f8d7d0000, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T08:00:00,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37287-0x100046f8d7d0001, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T08:00:00,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37287-0x100046f8d7d0001, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:00:00,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38765-0x100046f8d7d0000, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:00:00,553 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T08:00:00,553 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T08:00:00,553 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T08:00:00,553 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0106a245d0e8,37287,1733644748675' ***** 2024-12-08T08:00:00,553 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T08:00:00,553 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38765-0x100046f8d7d0000, quorum=127.0.0.1:63530, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T08:00:00,554 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37287-0x100046f8d7d0001, quorum=127.0.0.1:63530, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T08:00:00,554 INFO [RS:0;0106a245d0e8:37287 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T08:00:00,554 INFO [RS:0;0106a245d0e8:37287 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T08:00:00,554 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T08:00:00,554 INFO [RS:0;0106a245d0e8:37287 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T08:00:00,554 INFO [RS:0;0106a245d0e8:37287 {}] regionserver.HRegionServer(3091): Received CLOSE for da76fd30cf8a9b7d5699f15f332f2a37 2024-12-08T08:00:00,554 INFO [RS:0;0106a245d0e8:37287 {}] regionserver.HRegionServer(959): stopping server 0106a245d0e8,37287,1733644748675 2024-12-08T08:00:00,554 INFO [RS:0;0106a245d0e8:37287 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T08:00:00,554 INFO [RS:0;0106a245d0e8:37287 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;0106a245d0e8:37287. 2024-12-08T08:00:00,554 DEBUG [RS:0;0106a245d0e8:37287 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T08:00:00,554 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing da76fd30cf8a9b7d5699f15f332f2a37, disabling compactions & flushes 2024-12-08T08:00:00,554 DEBUG [RS:0;0106a245d0e8:37287 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T08:00:00,554 INFO [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37. 2024-12-08T08:00:00,555 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37. 2024-12-08T08:00:00,555 INFO [RS:0;0106a245d0e8:37287 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T08:00:00,555 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37. after waiting 0 ms 2024-12-08T08:00:00,555 INFO [RS:0;0106a245d0e8:37287 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T08:00:00,555 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37. 2024-12-08T08:00:00,555 INFO [RS:0;0106a245d0e8:37287 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T08:00:00,555 INFO [RS:0;0106a245d0e8:37287 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-08T08:00:00,555 INFO [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing da76fd30cf8a9b7d5699f15f332f2a37 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-08T08:00:00,555 INFO [RS:0;0106a245d0e8:37287 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-08T08:00:00,555 DEBUG [RS:0;0106a245d0e8:37287 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, da76fd30cf8a9b7d5699f15f332f2a37=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37.} 2024-12-08T08:00:00,555 DEBUG [RS:0;0106a245d0e8:37287 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, da76fd30cf8a9b7d5699f15f332f2a37 2024-12-08T08:00:00,555 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T08:00:00,555 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T08:00:00,555 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T08:00:00,555 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T08:00:00,555 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T08:00:00,555 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-12-08T08:00:00,559 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/.tmp/info/d49c4820fecb4cb4b20c36c53a8dd355 is 1080, key is row0001/info:/1733644800473/Put/seqid=0 2024-12-08T08:00:00,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37527 is added to blk_1073741845_1021 (size=6033) 2024-12-08T08:00:00,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42175 is added to blk_1073741845_1021 (size=6033) 2024-12-08T08:00:00,564 INFO [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/.tmp/info/d49c4820fecb4cb4b20c36c53a8dd355 2024-12-08T08:00:00,571 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/.tmp/info/d49c4820fecb4cb4b20c36c53a8dd355 as hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/info/d49c4820fecb4cb4b20c36c53a8dd355 2024-12-08T08:00:00,573 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/hbase/meta/1588230740/.tmp/info/4e9c11c1072742cca83b8218606fa119 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37./info:regioninfo/1733644750277/Put/seqid=0 2024-12-08T08:00:00,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37527 is added to blk_1073741846_1022 (size=7308) 2024-12-08T08:00:00,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42175 is added to blk_1073741846_1022 (size=7308) 2024-12-08T08:00:00,578 INFO [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/info/d49c4820fecb4cb4b20c36c53a8dd355, entries=1, sequenceid=22, filesize=5.9 K 2024-12-08T08:00:00,578 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/hbase/meta/1588230740/.tmp/info/4e9c11c1072742cca83b8218606fa119 2024-12-08T08:00:00,579 INFO [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for da76fd30cf8a9b7d5699f15f332f2a37 in 24ms, sequenceid=22, compaction requested=true 2024-12-08T08:00:00,579 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/info/1e60203e521944d3898a3c20380baecf, hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/info/d2cba1d900b04989b81e56a08f7b0a16, hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/info/d63f659087994db1ab8e612a959a5d48] to archive 2024-12-08T08:00:00,580 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T08:00:00,582 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/info/1e60203e521944d3898a3c20380baecf to hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/info/1e60203e521944d3898a3c20380baecf 2024-12-08T08:00:00,584 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/info/d2cba1d900b04989b81e56a08f7b0a16 to hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/info/d2cba1d900b04989b81e56a08f7b0a16 2024-12-08T08:00:00,585 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/info/d63f659087994db1ab8e612a959a5d48 to hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/info/d63f659087994db1ab8e612a959a5d48 2024-12-08T08:00:00,586 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=0106a245d0e8:38765 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-08T08:00:00,586 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [1e60203e521944d3898a3c20380baecf=6033, d2cba1d900b04989b81e56a08f7b0a16=6033, d63f659087994db1ab8e612a959a5d48=6033] 2024-12-08T08:00:00,591 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/da76fd30cf8a9b7d5699f15f332f2a37/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-12-08T08:00:00,592 INFO [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37. 2024-12-08T08:00:00,592 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for da76fd30cf8a9b7d5699f15f332f2a37: Waiting for close lock at 1733644800554Running coprocessor pre-close hooks at 1733644800554Disabling compacts and flushes for region at 1733644800554Disabling writes for close at 1733644800555 (+1 ms)Obtaining lock to block concurrent updates at 1733644800555Preparing flush snapshotting stores in da76fd30cf8a9b7d5699f15f332f2a37 at 1733644800555Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1733644800555Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37. at 1733644800556 (+1 ms)Flushing da76fd30cf8a9b7d5699f15f332f2a37/info: creating writer at 1733644800557 (+1 ms)Flushing da76fd30cf8a9b7d5699f15f332f2a37/info: appending metadata at 1733644800559 (+2 ms)Flushing da76fd30cf8a9b7d5699f15f332f2a37/info: closing flushed file at 1733644800559Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5152412f: reopening flushed file at 1733644800570 (+11 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for da76fd30cf8a9b7d5699f15f332f2a37 in 24ms, sequenceid=22, compaction requested=true at 1733644800579 (+9 ms)Writing region close event to WAL at 1733644800587 (+8 ms)Running coprocessor post-close hooks at 1733644800592 (+5 ms)Closed at 1733644800592 2024-12-08T08:00:00,592 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733644749918.da76fd30cf8a9b7d5699f15f332f2a37. 2024-12-08T08:00:00,598 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/hbase/meta/1588230740/.tmp/ns/183f35a084fe4fcfb3fdf5784662271d is 43, key is default/ns:d/1733644749791/Put/seqid=0 2024-12-08T08:00:00,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42175 is added to blk_1073741847_1023 (size=5153) 2024-12-08T08:00:00,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37527 is added to blk_1073741847_1023 (size=5153) 2024-12-08T08:00:00,744 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:00,751 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:00,755 DEBUG [RS:0;0106a245d0e8:37287 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-08T08:00:00,955 DEBUG [RS:0;0106a245d0e8:37287 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-08T08:00:01,005 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/hbase/meta/1588230740/.tmp/ns/183f35a084fe4fcfb3fdf5784662271d 2024-12-08T08:00:01,027 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/hbase/meta/1588230740/.tmp/table/d7500ea77e1a4129973fe9210769ccc7 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1733644750288/Put/seqid=0 2024-12-08T08:00:01,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37527 is added to blk_1073741848_1024 (size=5508) 2024-12-08T08:00:01,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42175 is added to blk_1073741848_1024 (size=5508) 2024-12-08T08:00:01,031 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/hbase/meta/1588230740/.tmp/table/d7500ea77e1a4129973fe9210769ccc7 2024-12-08T08:00:01,036 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/hbase/meta/1588230740/.tmp/info/4e9c11c1072742cca83b8218606fa119 as hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/hbase/meta/1588230740/info/4e9c11c1072742cca83b8218606fa119 2024-12-08T08:00:01,041 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/hbase/meta/1588230740/info/4e9c11c1072742cca83b8218606fa119, entries=10, sequenceid=11, filesize=7.1 K 2024-12-08T08:00:01,042 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/hbase/meta/1588230740/.tmp/ns/183f35a084fe4fcfb3fdf5784662271d as hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/hbase/meta/1588230740/ns/183f35a084fe4fcfb3fdf5784662271d 2024-12-08T08:00:01,049 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/hbase/meta/1588230740/ns/183f35a084fe4fcfb3fdf5784662271d, entries=2, sequenceid=11, filesize=5.0 K 2024-12-08T08:00:01,049 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/hbase/meta/1588230740/.tmp/table/d7500ea77e1a4129973fe9210769ccc7 as hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/hbase/meta/1588230740/table/d7500ea77e1a4129973fe9210769ccc7 2024-12-08T08:00:01,056 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/hbase/meta/1588230740/table/d7500ea77e1a4129973fe9210769ccc7, entries=2, sequenceid=11, filesize=5.4 K 2024-12-08T08:00:01,057 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 502ms, sequenceid=11, compaction requested=false 2024-12-08T08:00:01,061 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-08T08:00:01,062 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T08:00:01,062 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T08:00:01,062 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733644800555Running coprocessor pre-close hooks at 1733644800555Disabling compacts and flushes for region at 1733644800555Disabling writes for close at 1733644800555Obtaining lock to block concurrent updates at 1733644800555Preparing flush snapshotting stores in 1588230740 at 1733644800555Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1733644800555Flushing stores of hbase:meta,,1.1588230740 at 1733644800556 (+1 ms)Flushing 1588230740/info: creating writer at 1733644800556Flushing 1588230740/info: appending metadata at 1733644800572 (+16 ms)Flushing 1588230740/info: closing flushed file at 1733644800572Flushing 1588230740/ns: creating writer at 1733644800584 (+12 ms)Flushing 1588230740/ns: appending metadata at 1733644800598 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733644800598Flushing 1588230740/table: creating writer at 1733644801012 (+414 ms)Flushing 1588230740/table: appending metadata at 1733644801026 (+14 ms)Flushing 1588230740/table: closing flushed file at 1733644801026Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7f35431a: reopening flushed file at 1733644801036 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4e575934: reopening flushed file at 1733644801042 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@636eba95: reopening flushed file at 1733644801049 (+7 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 502ms, sequenceid=11, compaction requested=false at 1733644801057 (+8 ms)Writing region close event to WAL at 1733644801058 (+1 ms)Running coprocessor post-close hooks at 1733644801062 (+4 ms)Closed at 1733644801062 2024-12-08T08:00:01,062 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-08T08:00:01,155 INFO [RS:0;0106a245d0e8:37287 {}] regionserver.HRegionServer(976): stopping server 0106a245d0e8,37287,1733644748675; all regions closed. 2024-12-08T08:00:01,156 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:00:01,156 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:00:01,156 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:00:01,156 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:00:01,157 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:00:01,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37527 is added to blk_1073741834_1010 (size=3306) 2024-12-08T08:00:01,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42175 is added to blk_1073741834_1010 (size=3306) 2024-12-08T08:00:01,163 DEBUG [RS:0;0106a245d0e8:37287 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/oldWALs 2024-12-08T08:00:01,163 INFO [RS:0;0106a245d0e8:37287 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0106a245d0e8%2C37287%2C1733644748675.meta:.meta(num 1733644749702) 2024-12-08T08:00:01,163 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:00:01,163 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:00:01,163 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:00:01,163 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:00:01,163 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:00:01,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37527 is added to blk_1073741844_1020 (size=1252) 2024-12-08T08:00:01,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42175 is added to blk_1073741844_1020 (size=1252) 2024-12-08T08:00:01,168 DEBUG [RS:0;0106a245d0e8:37287 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/oldWALs 2024-12-08T08:00:01,168 INFO [RS:0;0106a245d0e8:37287 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0106a245d0e8%2C37287%2C1733644748675:(num 1733644800474) 2024-12-08T08:00:01,168 DEBUG [RS:0;0106a245d0e8:37287 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T08:00:01,168 INFO [RS:0;0106a245d0e8:37287 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T08:00:01,169 INFO [RS:0;0106a245d0e8:37287 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T08:00:01,169 INFO [RS:0;0106a245d0e8:37287 {}] hbase.ChoreService(370): Chore service for: regionserver/0106a245d0e8:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-08T08:00:01,169 INFO [RS:0;0106a245d0e8:37287 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T08:00:01,169 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T08:00:01,169 INFO [RS:0;0106a245d0e8:37287 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37287 2024-12-08T08:00:01,204 INFO [regionserver/0106a245d0e8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T08:00:01,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37287-0x100046f8d7d0001, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0106a245d0e8,37287,1733644748675 2024-12-08T08:00:01,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38765-0x100046f8d7d0000, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T08:00:01,216 INFO [RS:0;0106a245d0e8:37287 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T08:00:01,226 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0106a245d0e8,37287,1733644748675] 2024-12-08T08:00:01,237 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0106a245d0e8,37287,1733644748675 already deleted, retry=false 2024-12-08T08:00:01,237 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0106a245d0e8,37287,1733644748675 expired; onlineServers=0 2024-12-08T08:00:01,237 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '0106a245d0e8,38765,1733644748484' ***** 2024-12-08T08:00:01,237 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-08T08:00:01,237 INFO [M:0;0106a245d0e8:38765 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T08:00:01,237 INFO [M:0;0106a245d0e8:38765 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T08:00:01,237 DEBUG [M:0;0106a245d0e8:38765 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-08T08:00:01,237 DEBUG [M:0;0106a245d0e8:38765 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-08T08:00:01,237 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-08T08:00:01,237 DEBUG [master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.small.0-1733644749016 {}] cleaner.HFileCleaner(306): Exit Thread[master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.small.0-1733644749016,5,FailOnTimeoutGroup] 2024-12-08T08:00:01,237 DEBUG [master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.large.0-1733644749016 {}] cleaner.HFileCleaner(306): Exit Thread[master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.large.0-1733644749016,5,FailOnTimeoutGroup] 2024-12-08T08:00:01,238 INFO [M:0;0106a245d0e8:38765 {}] hbase.ChoreService(370): Chore service for: master/0106a245d0e8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-08T08:00:01,238 INFO [M:0;0106a245d0e8:38765 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T08:00:01,238 DEBUG [M:0;0106a245d0e8:38765 {}] master.HMaster(1795): Stopping service threads 2024-12-08T08:00:01,238 INFO [M:0;0106a245d0e8:38765 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-08T08:00:01,238 INFO [M:0;0106a245d0e8:38765 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T08:00:01,238 INFO [M:0;0106a245d0e8:38765 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-08T08:00:01,238 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-08T08:00:01,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37287-0x100046f8d7d0001, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T08:00:01,326 INFO [RS:0;0106a245d0e8:37287 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T08:00:01,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37287-0x100046f8d7d0001, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T08:00:01,327 INFO [RS:0;0106a245d0e8:37287 {}] regionserver.HRegionServer(1031): Exiting; stopping=0106a245d0e8,37287,1733644748675; zookeeper connection closed. 2024-12-08T08:00:01,327 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@315d23ec {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@315d23ec 2024-12-08T08:00:01,327 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-08T08:00:01,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38765-0x100046f8d7d0000, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T08:00:01,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38765-0x100046f8d7d0000, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:00:01,334 DEBUG [M:0;0106a245d0e8:38765 {}] zookeeper.ZKUtil(347): master:38765-0x100046f8d7d0000, quorum=127.0.0.1:63530, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-08T08:00:01,334 WARN [M:0;0106a245d0e8:38765 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-08T08:00:01,335 INFO [M:0;0106a245d0e8:38765 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/.lastflushedseqids 2024-12-08T08:00:01,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37527 is added to blk_1073741849_1025 (size=130) 2024-12-08T08:00:01,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42175 is added to blk_1073741849_1025 (size=130) 2024-12-08T08:00:01,341 INFO [M:0;0106a245d0e8:38765 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-08T08:00:01,341 INFO [M:0;0106a245d0e8:38765 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-08T08:00:01,341 DEBUG [M:0;0106a245d0e8:38765 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T08:00:01,341 INFO [M:0;0106a245d0e8:38765 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T08:00:01,341 DEBUG [M:0;0106a245d0e8:38765 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T08:00:01,341 DEBUG [M:0;0106a245d0e8:38765 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T08:00:01,341 DEBUG [M:0;0106a245d0e8:38765 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T08:00:01,341 INFO [M:0;0106a245d0e8:38765 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.60 KB heapSize=55.01 KB 2024-12-08T08:00:01,357 DEBUG [M:0;0106a245d0e8:38765 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7e8f0bfc4a26414fa79b00c29cb53873 is 82, key is hbase:meta,,1/info:regioninfo/1733644749730/Put/seqid=0 2024-12-08T08:00:01,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42175 is added to blk_1073741850_1026 (size=5672) 2024-12-08T08:00:01,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37527 is added to blk_1073741850_1026 (size=5672) 2024-12-08T08:00:01,363 INFO [M:0;0106a245d0e8:38765 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7e8f0bfc4a26414fa79b00c29cb53873 2024-12-08T08:00:01,382 DEBUG [M:0;0106a245d0e8:38765 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bad04c41505d485f841c88645105910b is 799, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733644750292/Put/seqid=0 2024-12-08T08:00:01,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37527 is added to blk_1073741851_1027 (size=7824) 2024-12-08T08:00:01,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42175 is added to blk_1073741851_1027 (size=7824) 2024-12-08T08:00:01,387 INFO [M:0;0106a245d0e8:38765 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=43.00 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bad04c41505d485f841c88645105910b 2024-12-08T08:00:01,392 INFO [M:0;0106a245d0e8:38765 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for bad04c41505d485f841c88645105910b 2024-12-08T08:00:01,407 DEBUG [M:0;0106a245d0e8:38765 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/08cd00039743415eb5b36e34908a9e90 is 69, key is 0106a245d0e8,37287,1733644748675/rs:state/1733644749180/Put/seqid=0 2024-12-08T08:00:01,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37527 is added to blk_1073741852_1028 (size=5156) 2024-12-08T08:00:01,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42175 is added to blk_1073741852_1028 (size=5156) 2024-12-08T08:00:01,412 INFO [M:0;0106a245d0e8:38765 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/08cd00039743415eb5b36e34908a9e90 2024-12-08T08:00:01,431 DEBUG [M:0;0106a245d0e8:38765 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/98593b6636f8419db063fc3a1a438e7c is 52, key is load_balancer_on/state:d/1733644749914/Put/seqid=0 2024-12-08T08:00:01,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42175 is added to blk_1073741853_1029 (size=5056) 2024-12-08T08:00:01,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37527 is added to blk_1073741853_1029 (size=5056) 2024-12-08T08:00:01,436 INFO [M:0;0106a245d0e8:38765 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/98593b6636f8419db063fc3a1a438e7c 2024-12-08T08:00:01,441 DEBUG [M:0;0106a245d0e8:38765 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7e8f0bfc4a26414fa79b00c29cb53873 as hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7e8f0bfc4a26414fa79b00c29cb53873 2024-12-08T08:00:01,446 INFO [M:0;0106a245d0e8:38765 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7e8f0bfc4a26414fa79b00c29cb53873, entries=8, sequenceid=121, filesize=5.5 K 2024-12-08T08:00:01,447 DEBUG [M:0;0106a245d0e8:38765 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bad04c41505d485f841c88645105910b as hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/bad04c41505d485f841c88645105910b 2024-12-08T08:00:01,451 INFO [M:0;0106a245d0e8:38765 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for bad04c41505d485f841c88645105910b 2024-12-08T08:00:01,451 INFO [M:0;0106a245d0e8:38765 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/bad04c41505d485f841c88645105910b, entries=14, sequenceid=121, filesize=7.6 K 2024-12-08T08:00:01,452 DEBUG [M:0;0106a245d0e8:38765 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/08cd00039743415eb5b36e34908a9e90 as hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/08cd00039743415eb5b36e34908a9e90 2024-12-08T08:00:01,457 INFO [M:0;0106a245d0e8:38765 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/08cd00039743415eb5b36e34908a9e90, entries=1, sequenceid=121, filesize=5.0 K 2024-12-08T08:00:01,458 DEBUG [M:0;0106a245d0e8:38765 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/98593b6636f8419db063fc3a1a438e7c as hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/98593b6636f8419db063fc3a1a438e7c 2024-12-08T08:00:01,464 INFO [M:0;0106a245d0e8:38765 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35287/user/jenkins/test-data/32ba1d6b-19a6-723f-daf7-2be1ddfda163/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/98593b6636f8419db063fc3a1a438e7c, entries=1, sequenceid=121, filesize=4.9 K 2024-12-08T08:00:01,465 INFO [M:0;0106a245d0e8:38765 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.60 KB/44650, heapSize ~54.95 KB/56264, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 124ms, sequenceid=121, compaction requested=false 2024-12-08T08:00:01,466 INFO [M:0;0106a245d0e8:38765 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T08:00:01,466 DEBUG [M:0;0106a245d0e8:38765 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733644801341Disabling compacts and flushes for region at 1733644801341Disabling writes for close at 1733644801341Obtaining lock to block concurrent updates at 1733644801341Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733644801341Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44650, getHeapSize=56264, getOffHeapSize=0, getCellsCount=140 at 1733644801341Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733644801342 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733644801342Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733644801357 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733644801357Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733644801368 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733644801382 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733644801382Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733644801392 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733644801407 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733644801407Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733644801416 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733644801430 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733644801430Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c09f28a: reopening flushed file at 1733644801440 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2da53d50: reopening flushed file at 1733644801446 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3ec5648a: reopening flushed file at 1733644801452 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7fb95ad7: reopening flushed file at 1733644801457 (+5 ms)Finished flush of dataSize ~43.60 KB/44650, heapSize ~54.95 KB/56264, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 124ms, sequenceid=121, compaction requested=false at 1733644801465 (+8 ms)Writing region close event to WAL at 1733644801466 (+1 ms)Closed at 1733644801466 2024-12-08T08:00:01,467 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:00:01,467 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:00:01,467 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:00:01,467 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:00:01,467 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:00:01,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37527 is added to blk_1073741830_1006 (size=53047) 2024-12-08T08:00:01,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42175 is added to blk_1073741830_1006 (size=53047) 2024-12-08T08:00:01,469 INFO [M:0;0106a245d0e8:38765 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-08T08:00:01,469 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T08:00:01,469 INFO [M:0;0106a245d0e8:38765 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38765 2024-12-08T08:00:01,470 INFO [M:0;0106a245d0e8:38765 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T08:00:01,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38765-0x100046f8d7d0000, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T08:00:01,635 INFO [M:0;0106a245d0e8:38765 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T08:00:01,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38765-0x100046f8d7d0000, quorum=127.0.0.1:63530, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T08:00:01,671 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b173219{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T08:00:01,671 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7c1a9f69{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T08:00:01,672 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T08:00:01,672 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@bff2115{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T08:00:01,672 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@26731840{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/hadoop.log.dir/,STOPPED} 2024-12-08T08:00:01,673 WARN [BP-1167299508-172.17.0.2-1733644746014 heartbeating to localhost/127.0.0.1:35287 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T08:00:01,673 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T08:00:01,673 WARN [BP-1167299508-172.17.0.2-1733644746014 heartbeating to localhost/127.0.0.1:35287 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1167299508-172.17.0.2-1733644746014 (Datanode Uuid ccec0154-d0e2-4e21-9d64-62b551b2ac39) service to localhost/127.0.0.1:35287 2024-12-08T08:00:01,673 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T08:00:01,673 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/cluster_b3167e19-aa75-8dc8-fb12-43d0adad6fe8/data/data3/current/BP-1167299508-172.17.0.2-1733644746014 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T08:00:01,674 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/cluster_b3167e19-aa75-8dc8-fb12-43d0adad6fe8/data/data4/current/BP-1167299508-172.17.0.2-1733644746014 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T08:00:01,674 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T08:00:01,676 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5570a111{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T08:00:01,676 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@61fcc471{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T08:00:01,676 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T08:00:01,676 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@77f859f0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T08:00:01,676 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@443c49f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/hadoop.log.dir/,STOPPED} 2024-12-08T08:00:01,678 WARN [BP-1167299508-172.17.0.2-1733644746014 heartbeating to localhost/127.0.0.1:35287 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T08:00:01,678 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T08:00:01,678 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T08:00:01,678 WARN [BP-1167299508-172.17.0.2-1733644746014 heartbeating to localhost/127.0.0.1:35287 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1167299508-172.17.0.2-1733644746014 (Datanode Uuid 17d897d4-f2d3-49fe-8b99-2ef0e5530908) service to localhost/127.0.0.1:35287 2024-12-08T08:00:01,678 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/cluster_b3167e19-aa75-8dc8-fb12-43d0adad6fe8/data/data1/current/BP-1167299508-172.17.0.2-1733644746014 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T08:00:01,679 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/cluster_b3167e19-aa75-8dc8-fb12-43d0adad6fe8/data/data2/current/BP-1167299508-172.17.0.2-1733644746014 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T08:00:01,679 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T08:00:01,684 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@206f042f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T08:00:01,685 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e21aaf2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T08:00:01,685 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T08:00:01,685 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4732430a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T08:00:01,685 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e470e04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/hadoop.log.dir/,STOPPED} 2024-12-08T08:00:01,690 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-08T08:00:01,709 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-08T08:00:01,716 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=205 (was 179) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35287 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35287 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35287 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35287 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35287 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35287 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:35287 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35287 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35287 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=204 (was 203) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=8098 (was 8267) 2024-12-08T08:00:01,723 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=205, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=204, ProcessCount=11, AvailableMemoryMB=8098 2024-12-08T08:00:01,724 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-08T08:00:01,724 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/hadoop.log.dir so I do NOT create it in target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939 2024-12-08T08:00:01,724 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b403fa25-cc4e-482c-b79a-32ed04d75846/hadoop.tmp.dir so I do NOT create it in target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939 2024-12-08T08:00:01,724 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/cluster_aaac9a43-b979-b7bd-4dc0-5c8e8d096da3, deleteOnExit=true 2024-12-08T08:00:01,724 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-08T08:00:01,724 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/test.cache.data in system properties and HBase conf 2024-12-08T08:00:01,724 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/hadoop.tmp.dir in system properties and HBase conf 2024-12-08T08:00:01,724 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/hadoop.log.dir in system properties and HBase conf 2024-12-08T08:00:01,724 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-08T08:00:01,725 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-08T08:00:01,725 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-08T08:00:01,725 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-08T08:00:01,725 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-08T08:00:01,725 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-08T08:00:01,725 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-08T08:00:01,725 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T08:00:01,725 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-08T08:00:01,725 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-08T08:00:01,725 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T08:00:01,726 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T08:00:01,726 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-08T08:00:01,726 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/nfs.dump.dir in system properties and HBase conf 2024-12-08T08:00:01,726 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/java.io.tmpdir in system properties and HBase conf 2024-12-08T08:00:01,726 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T08:00:01,726 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-08T08:00:01,726 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-08T08:00:01,742 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T08:00:01,745 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:01,752 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:02,123 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T08:00:02,128 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T08:00:02,129 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T08:00:02,129 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T08:00:02,129 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T08:00:02,129 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T08:00:02,130 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ac7d52f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/hadoop.log.dir/,AVAILABLE} 2024-12-08T08:00:02,130 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@444d0b71{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T08:00:02,232 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5cfa2328{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/java.io.tmpdir/jetty-localhost-36683-hadoop-hdfs-3_4_1-tests_jar-_-any-10026821980119489078/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T08:00:02,233 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@250c37c4{HTTP/1.1, (http/1.1)}{localhost:36683} 2024-12-08T08:00:02,233 INFO [Time-limited test {}] server.Server(415): Started @249801ms 2024-12-08T08:00:02,244 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T08:00:02,493 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T08:00:02,496 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T08:00:02,496 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T08:00:02,496 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T08:00:02,496 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T08:00:02,497 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a6db152{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/hadoop.log.dir/,AVAILABLE} 2024-12-08T08:00:02,497 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@345bbf4d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T08:00:02,601 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3fc20c75{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/java.io.tmpdir/jetty-localhost-36283-hadoop-hdfs-3_4_1-tests_jar-_-any-11030197536240550900/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T08:00:02,601 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4b32401d{HTTP/1.1, (http/1.1)}{localhost:36283} 2024-12-08T08:00:02,601 INFO [Time-limited test {}] server.Server(415): Started @250169ms 2024-12-08T08:00:02,602 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T08:00:02,629 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T08:00:02,631 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T08:00:02,632 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T08:00:02,632 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T08:00:02,632 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T08:00:02,633 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@268a31fc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/hadoop.log.dir/,AVAILABLE} 2024-12-08T08:00:02,633 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49bf1df8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T08:00:02,730 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@499df229{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/java.io.tmpdir/jetty-localhost-42129-hadoop-hdfs-3_4_1-tests_jar-_-any-2492181131273072383/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T08:00:02,730 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3323ea67{HTTP/1.1, (http/1.1)}{localhost:42129} 2024-12-08T08:00:02,730 INFO [Time-limited test {}] server.Server(415): Started @250298ms 2024-12-08T08:00:02,731 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T08:00:02,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:02,752 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:03,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:03,753 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:04,026 WARN [Thread-1966 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/cluster_aaac9a43-b979-b7bd-4dc0-5c8e8d096da3/data/data2/current/BP-840104754-172.17.0.2-1733644801755/current, will proceed with Du for space computation calculation, 2024-12-08T08:00:04,026 WARN [Thread-1965 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/cluster_aaac9a43-b979-b7bd-4dc0-5c8e8d096da3/data/data1/current/BP-840104754-172.17.0.2-1733644801755/current, will proceed with Du for space computation calculation, 2024-12-08T08:00:04,043 WARN [Thread-1929 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T08:00:04,045 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfe9b75531ef8278e with lease ID 0xdf60cfad873cdcc2: Processing first storage report for DS-db7cb783-6f32-4284-9cf1-a5bd98135d20 from datanode DatanodeRegistration(127.0.0.1:40687, datanodeUuid=8fda1568-2a57-4476-a731-d62e21810730, infoPort=37403, infoSecurePort=0, ipcPort=42817, storageInfo=lv=-57;cid=testClusterID;nsid=1905282189;c=1733644801755) 2024-12-08T08:00:04,045 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfe9b75531ef8278e with lease ID 0xdf60cfad873cdcc2: from storage DS-db7cb783-6f32-4284-9cf1-a5bd98135d20 node DatanodeRegistration(127.0.0.1:40687, datanodeUuid=8fda1568-2a57-4476-a731-d62e21810730, infoPort=37403, infoSecurePort=0, ipcPort=42817, storageInfo=lv=-57;cid=testClusterID;nsid=1905282189;c=1733644801755), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T08:00:04,045 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfe9b75531ef8278e with lease ID 0xdf60cfad873cdcc2: Processing first storage report for DS-83b7e9b3-049f-4969-8f31-f3e096ad962e from datanode DatanodeRegistration(127.0.0.1:40687, datanodeUuid=8fda1568-2a57-4476-a731-d62e21810730, infoPort=37403, infoSecurePort=0, ipcPort=42817, storageInfo=lv=-57;cid=testClusterID;nsid=1905282189;c=1733644801755) 2024-12-08T08:00:04,045 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfe9b75531ef8278e with lease ID 0xdf60cfad873cdcc2: from storage DS-83b7e9b3-049f-4969-8f31-f3e096ad962e node DatanodeRegistration(127.0.0.1:40687, datanodeUuid=8fda1568-2a57-4476-a731-d62e21810730, infoPort=37403, infoSecurePort=0, ipcPort=42817, storageInfo=lv=-57;cid=testClusterID;nsid=1905282189;c=1733644801755), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T08:00:04,229 WARN [Thread-1977 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/cluster_aaac9a43-b979-b7bd-4dc0-5c8e8d096da3/data/data4/current/BP-840104754-172.17.0.2-1733644801755/current, will proceed with Du for space computation calculation, 2024-12-08T08:00:04,229 WARN [Thread-1976 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/cluster_aaac9a43-b979-b7bd-4dc0-5c8e8d096da3/data/data3/current/BP-840104754-172.17.0.2-1733644801755/current, will proceed with Du for space computation calculation, 2024-12-08T08:00:04,246 WARN [Thread-1952 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T08:00:04,248 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd69ab9d61cee6c18 with lease ID 0xdf60cfad873cdcc3: Processing first storage report for DS-bc3236d4-2e7e-47d4-9ff5-72872c6d41ce from datanode DatanodeRegistration(127.0.0.1:45413, datanodeUuid=0dd5dfd3-8847-4a95-8449-e40b31516f4c, infoPort=44551, infoSecurePort=0, ipcPort=40471, storageInfo=lv=-57;cid=testClusterID;nsid=1905282189;c=1733644801755) 2024-12-08T08:00:04,248 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd69ab9d61cee6c18 with lease ID 0xdf60cfad873cdcc3: from storage DS-bc3236d4-2e7e-47d4-9ff5-72872c6d41ce node DatanodeRegistration(127.0.0.1:45413, datanodeUuid=0dd5dfd3-8847-4a95-8449-e40b31516f4c, infoPort=44551, infoSecurePort=0, ipcPort=40471, storageInfo=lv=-57;cid=testClusterID;nsid=1905282189;c=1733644801755), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T08:00:04,248 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd69ab9d61cee6c18 with lease ID 0xdf60cfad873cdcc3: Processing first storage report for DS-5ac54b13-2e7d-444b-9dbc-e6ad9a0c59e5 from datanode DatanodeRegistration(127.0.0.1:45413, datanodeUuid=0dd5dfd3-8847-4a95-8449-e40b31516f4c, infoPort=44551, infoSecurePort=0, ipcPort=40471, storageInfo=lv=-57;cid=testClusterID;nsid=1905282189;c=1733644801755) 2024-12-08T08:00:04,248 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd69ab9d61cee6c18 with lease ID 0xdf60cfad873cdcc3: from storage DS-5ac54b13-2e7d-444b-9dbc-e6ad9a0c59e5 node DatanodeRegistration(127.0.0.1:45413, datanodeUuid=0dd5dfd3-8847-4a95-8449-e40b31516f4c, infoPort=44551, infoSecurePort=0, ipcPort=40471, storageInfo=lv=-57;cid=testClusterID;nsid=1905282189;c=1733644801755), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T08:00:04,263 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939 2024-12-08T08:00:04,265 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/cluster_aaac9a43-b979-b7bd-4dc0-5c8e8d096da3/zookeeper_0, clientPort=64925, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/cluster_aaac9a43-b979-b7bd-4dc0-5c8e8d096da3/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/cluster_aaac9a43-b979-b7bd-4dc0-5c8e8d096da3/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-08T08:00:04,266 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64925 2024-12-08T08:00:04,266 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T08:00:04,267 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T08:00:04,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741825_1001 (size=7) 2024-12-08T08:00:04,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741825_1001 (size=7) 2024-12-08T08:00:04,283 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a with version=8 2024-12-08T08:00:04,283 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/hbase-staging 2024-12-08T08:00:04,286 INFO [Time-limited test {}] client.ConnectionUtils(128): master/0106a245d0e8:0 server-side Connection retries=45 2024-12-08T08:00:04,286 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T08:00:04,286 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T08:00:04,286 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T08:00:04,286 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T08:00:04,286 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T08:00:04,286 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-08T08:00:04,286 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T08:00:04,289 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40401 2024-12-08T08:00:04,290 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40401 connecting to ZooKeeper ensemble=127.0.0.1:64925 2024-12-08T08:00:04,353 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:404010x0, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T08:00:04,353 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40401-0x1000470676f0000 connected 2024-12-08T08:00:04,441 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T08:00:04,443 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T08:00:04,445 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40401-0x1000470676f0000, quorum=127.0.0.1:64925, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T08:00:04,445 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a, hbase.cluster.distributed=false 2024-12-08T08:00:04,446 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40401-0x1000470676f0000, quorum=127.0.0.1:64925, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T08:00:04,447 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40401 2024-12-08T08:00:04,447 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40401 2024-12-08T08:00:04,447 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40401 2024-12-08T08:00:04,448 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40401 2024-12-08T08:00:04,448 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40401 2024-12-08T08:00:04,464 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0106a245d0e8:0 server-side Connection retries=45 2024-12-08T08:00:04,464 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T08:00:04,464 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T08:00:04,464 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T08:00:04,464 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T08:00:04,464 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T08:00:04,464 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T08:00:04,464 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T08:00:04,465 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38961 2024-12-08T08:00:04,466 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38961 connecting to ZooKeeper ensemble=127.0.0.1:64925 2024-12-08T08:00:04,467 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T08:00:04,469 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T08:00:04,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:389610x0, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T08:00:04,484 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38961-0x1000470676f0001 connected 2024-12-08T08:00:04,484 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38961-0x1000470676f0001, quorum=127.0.0.1:64925, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T08:00:04,484 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T08:00:04,484 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T08:00:04,485 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38961-0x1000470676f0001, quorum=127.0.0.1:64925, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T08:00:04,485 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38961-0x1000470676f0001, quorum=127.0.0.1:64925, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T08:00:04,486 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38961 2024-12-08T08:00:04,486 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38961 2024-12-08T08:00:04,486 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38961 2024-12-08T08:00:04,486 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38961 2024-12-08T08:00:04,487 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38961 2024-12-08T08:00:04,497 DEBUG [M:0;0106a245d0e8:40401 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0106a245d0e8:40401 2024-12-08T08:00:04,497 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/0106a245d0e8,40401,1733644804285 2024-12-08T08:00:04,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38961-0x1000470676f0001, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T08:00:04,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40401-0x1000470676f0000, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T08:00:04,504 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40401-0x1000470676f0000, quorum=127.0.0.1:64925, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0106a245d0e8,40401,1733644804285 2024-12-08T08:00:04,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40401-0x1000470676f0000, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:00:04,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38961-0x1000470676f0001, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T08:00:04,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38961-0x1000470676f0001, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:00:04,515 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40401-0x1000470676f0000, quorum=127.0.0.1:64925, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T08:00:04,515 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0106a245d0e8,40401,1733644804285 from backup master directory 2024-12-08T08:00:04,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40401-0x1000470676f0000, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0106a245d0e8,40401,1733644804285 2024-12-08T08:00:04,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38961-0x1000470676f0001, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T08:00:04,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40401-0x1000470676f0000, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T08:00:04,525 WARN [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T08:00:04,525 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0106a245d0e8,40401,1733644804285 2024-12-08T08:00:04,529 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/hbase.id] with ID: 5bf73dc0-56d3-474b-81c6-4b8feb611e99 2024-12-08T08:00:04,529 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/.tmp/hbase.id 2024-12-08T08:00:04,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741826_1002 (size=42) 2024-12-08T08:00:04,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741826_1002 (size=42) 2024-12-08T08:00:04,535 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/.tmp/hbase.id]:[hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/hbase.id] 2024-12-08T08:00:04,545 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T08:00:04,545 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-08T08:00:04,546 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-08T08:00:04,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40401-0x1000470676f0000, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:00:04,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38961-0x1000470676f0001, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:00:04,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741827_1003 (size=196) 2024-12-08T08:00:04,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741827_1003 (size=196) 2024-12-08T08:00:04,564 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T08:00:04,565 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-08T08:00:04,565 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T08:00:04,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741828_1004 (size=1189) 2024-12-08T08:00:04,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741828_1004 (size=1189) 2024-12-08T08:00:04,572 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/MasterData/data/master/store 2024-12-08T08:00:04,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741829_1005 (size=34) 2024-12-08T08:00:04,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741829_1005 (size=34) 2024-12-08T08:00:04,577 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T08:00:04,578 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T08:00:04,578 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T08:00:04,578 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T08:00:04,578 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T08:00:04,578 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T08:00:04,578 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T08:00:04,578 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733644804577Disabling compacts and flushes for region at 1733644804577Disabling writes for close at 1733644804578 (+1 ms)Writing region close event to WAL at 1733644804578Closed at 1733644804578 2024-12-08T08:00:04,578 WARN [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/MasterData/data/master/store/.initializing 2024-12-08T08:00:04,578 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/MasterData/WALs/0106a245d0e8,40401,1733644804285 2024-12-08T08:00:04,581 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0106a245d0e8%2C40401%2C1733644804285, suffix=, logDir=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/MasterData/WALs/0106a245d0e8,40401,1733644804285, archiveDir=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/MasterData/oldWALs, maxLogs=10 2024-12-08T08:00:04,581 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C40401%2C1733644804285.1733644804581 2024-12-08T08:00:04,585 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/MasterData/WALs/0106a245d0e8,40401,1733644804285/0106a245d0e8%2C40401%2C1733644804285.1733644804581 2024-12-08T08:00:04,587 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37403:37403),(127.0.0.1/127.0.0.1:44551:44551)] 2024-12-08T08:00:04,587 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-08T08:00:04,587 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T08:00:04,587 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T08:00:04,587 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T08:00:04,589 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T08:00:04,590 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-08T08:00:04,590 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:00:04,590 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T08:00:04,590 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T08:00:04,591 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-08T08:00:04,591 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:00:04,591 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T08:00:04,591 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T08:00:04,592 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-08T08:00:04,592 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:00:04,593 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T08:00:04,593 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T08:00:04,594 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-08T08:00:04,594 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:00:04,594 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T08:00:04,594 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T08:00:04,595 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T08:00:04,595 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T08:00:04,596 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T08:00:04,596 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T08:00:04,597 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T08:00:04,598 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T08:00:04,600 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T08:00:04,600 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=728439, jitterRate=-0.07374235987663269}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T08:00:04,601 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733644804588Initializing all the Stores at 1733644804588Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644804588Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644804588Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644804588Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644804588Cleaning up temporary data from old regions at 1733644804596 (+8 ms)Region opened successfully at 1733644804601 (+5 ms) 2024-12-08T08:00:04,601 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-08T08:00:04,603 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@284a03d2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0106a245d0e8/172.17.0.2:0 2024-12-08T08:00:04,604 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-08T08:00:04,604 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-08T08:00:04,604 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-08T08:00:04,604 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-08T08:00:04,605 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-08T08:00:04,605 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-08T08:00:04,605 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-08T08:00:04,607 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-08T08:00:04,608 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40401-0x1000470676f0000, quorum=127.0.0.1:64925, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-08T08:00:04,620 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-08T08:00:04,620 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-08T08:00:04,621 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40401-0x1000470676f0000, quorum=127.0.0.1:64925, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-08T08:00:04,630 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-08T08:00:04,631 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-08T08:00:04,632 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40401-0x1000470676f0000, quorum=127.0.0.1:64925, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-08T08:00:04,641 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-08T08:00:04,642 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40401-0x1000470676f0000, quorum=127.0.0.1:64925, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-08T08:00:04,651 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-08T08:00:04,653 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40401-0x1000470676f0000, quorum=127.0.0.1:64925, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-08T08:00:04,662 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-08T08:00:04,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38961-0x1000470676f0001, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T08:00:04,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40401-0x1000470676f0000, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T08:00:04,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38961-0x1000470676f0001, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:00:04,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40401-0x1000470676f0000, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:00:04,673 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=0106a245d0e8,40401,1733644804285, sessionid=0x1000470676f0000, setting cluster-up flag (Was=false) 2024-12-08T08:00:04,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38961-0x1000470676f0001, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:00:04,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40401-0x1000470676f0000, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:00:04,725 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-08T08:00:04,726 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0106a245d0e8,40401,1733644804285 2024-12-08T08:00:04,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38961-0x1000470676f0001, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:00:04,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40401-0x1000470676f0000, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:00:04,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:04,753 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:04,778 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-08T08:00:04,779 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0106a245d0e8,40401,1733644804285 2024-12-08T08:00:04,780 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-08T08:00:04,781 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-08T08:00:04,781 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-08T08:00:04,781 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-08T08:00:04,782 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0106a245d0e8,40401,1733644804285 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-08T08:00:04,783 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0106a245d0e8:0, corePoolSize=5, maxPoolSize=5 2024-12-08T08:00:04,783 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0106a245d0e8:0, corePoolSize=5, maxPoolSize=5 2024-12-08T08:00:04,783 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0106a245d0e8:0, corePoolSize=5, maxPoolSize=5 2024-12-08T08:00:04,783 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0106a245d0e8:0, corePoolSize=5, maxPoolSize=5 2024-12-08T08:00:04,783 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0106a245d0e8:0, corePoolSize=10, maxPoolSize=10 2024-12-08T08:00:04,783 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T08:00:04,783 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0106a245d0e8:0, corePoolSize=2, maxPoolSize=2 2024-12-08T08:00:04,783 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T08:00:04,785 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733644834785 2024-12-08T08:00:04,785 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-08T08:00:04,785 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-08T08:00:04,785 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-08T08:00:04,785 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-08T08:00:04,785 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-08T08:00:04,785 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-08T08:00:04,785 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T08:00:04,785 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-08T08:00:04,785 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-08T08:00:04,785 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-08T08:00:04,785 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T08:00:04,785 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-08T08:00:04,786 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-08T08:00:04,786 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-08T08:00:04,786 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.large.0-1733644804786,5,FailOnTimeoutGroup] 2024-12-08T08:00:04,786 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.small.0-1733644804786,5,FailOnTimeoutGroup] 2024-12-08T08:00:04,786 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T08:00:04,786 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-08T08:00:04,786 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-08T08:00:04,786 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-08T08:00:04,787 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:00:04,787 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-08T08:00:04,788 INFO [RS:0;0106a245d0e8:38961 {}] regionserver.HRegionServer(746): ClusterId : 5bf73dc0-56d3-474b-81c6-4b8feb611e99 2024-12-08T08:00:04,788 DEBUG [RS:0;0106a245d0e8:38961 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T08:00:04,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741831_1007 (size=1321) 2024-12-08T08:00:04,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741831_1007 (size=1321) 2024-12-08T08:00:04,793 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-08T08:00:04,793 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a 2024-12-08T08:00:04,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741832_1008 (size=32) 2024-12-08T08:00:04,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741832_1008 (size=32) 2024-12-08T08:00:04,799 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T08:00:04,799 DEBUG [RS:0;0106a245d0e8:38961 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T08:00:04,799 DEBUG [RS:0;0106a245d0e8:38961 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T08:00:04,800 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T08:00:04,801 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T08:00:04,801 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:00:04,801 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T08:00:04,801 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T08:00:04,802 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T08:00:04,802 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:00:04,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T08:00:04,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T08:00:04,804 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T08:00:04,804 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:00:04,804 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T08:00:04,804 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T08:00:04,805 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T08:00:04,805 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:00:04,805 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T08:00:04,805 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T08:00:04,806 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/hbase/meta/1588230740 2024-12-08T08:00:04,806 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/hbase/meta/1588230740 2024-12-08T08:00:04,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T08:00:04,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T08:00:04,807 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T08:00:04,808 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T08:00:04,810 DEBUG [RS:0;0106a245d0e8:38961 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T08:00:04,810 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T08:00:04,810 DEBUG [RS:0;0106a245d0e8:38961 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a9eca0a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0106a245d0e8/172.17.0.2:0 2024-12-08T08:00:04,811 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=728304, jitterRate=-0.07391482591629028}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T08:00:04,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733644804799Initializing all the Stores at 1733644804799Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644804799Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644804800 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644804800Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644804800Cleaning up temporary data from old regions at 1733644804807 (+7 ms)Region opened successfully at 1733644804811 (+4 ms) 2024-12-08T08:00:04,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T08:00:04,811 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T08:00:04,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T08:00:04,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T08:00:04,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T08:00:04,812 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T08:00:04,812 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733644804811Disabling compacts and flushes for region at 1733644804811Disabling writes for close at 1733644804811Writing region close event to WAL at 1733644804812 (+1 ms)Closed at 1733644804812 2024-12-08T08:00:04,840 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T08:00:04,840 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-08T08:00:04,840 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-08T08:00:04,841 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T08:00:04,842 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-08T08:00:04,844 DEBUG [RS:0;0106a245d0e8:38961 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0106a245d0e8:38961 2024-12-08T08:00:04,844 INFO [RS:0;0106a245d0e8:38961 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T08:00:04,844 INFO [RS:0;0106a245d0e8:38961 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T08:00:04,844 DEBUG [RS:0;0106a245d0e8:38961 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T08:00:04,845 INFO [RS:0;0106a245d0e8:38961 {}] regionserver.HRegionServer(2659): reportForDuty to master=0106a245d0e8,40401,1733644804285 with port=38961, startcode=1733644804464 2024-12-08T08:00:04,845 DEBUG [RS:0;0106a245d0e8:38961 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T08:00:04,847 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38661, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T08:00:04,847 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40401 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0106a245d0e8,38961,1733644804464 2024-12-08T08:00:04,847 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40401 {}] master.ServerManager(517): Registering regionserver=0106a245d0e8,38961,1733644804464 2024-12-08T08:00:04,849 DEBUG [RS:0;0106a245d0e8:38961 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a 2024-12-08T08:00:04,849 DEBUG [RS:0;0106a245d0e8:38961 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38901 2024-12-08T08:00:04,849 DEBUG [RS:0;0106a245d0e8:38961 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T08:00:04,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40401-0x1000470676f0000, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T08:00:04,858 DEBUG [RS:0;0106a245d0e8:38961 {}] zookeeper.ZKUtil(111): regionserver:38961-0x1000470676f0001, quorum=127.0.0.1:64925, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0106a245d0e8,38961,1733644804464 2024-12-08T08:00:04,858 WARN [RS:0;0106a245d0e8:38961 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T08:00:04,858 INFO [RS:0;0106a245d0e8:38961 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T08:00:04,858 DEBUG [RS:0;0106a245d0e8:38961 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/WALs/0106a245d0e8,38961,1733644804464 2024-12-08T08:00:04,858 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0106a245d0e8,38961,1733644804464] 2024-12-08T08:00:04,861 INFO [RS:0;0106a245d0e8:38961 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T08:00:04,863 INFO [RS:0;0106a245d0e8:38961 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T08:00:04,863 INFO [RS:0;0106a245d0e8:38961 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T08:00:04,864 INFO [RS:0;0106a245d0e8:38961 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T08:00:04,864 INFO [RS:0;0106a245d0e8:38961 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T08:00:04,864 INFO [RS:0;0106a245d0e8:38961 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T08:00:04,864 INFO [RS:0;0106a245d0e8:38961 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T08:00:04,865 DEBUG [RS:0;0106a245d0e8:38961 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T08:00:04,865 DEBUG [RS:0;0106a245d0e8:38961 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T08:00:04,865 DEBUG [RS:0;0106a245d0e8:38961 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T08:00:04,865 DEBUG [RS:0;0106a245d0e8:38961 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T08:00:04,865 DEBUG [RS:0;0106a245d0e8:38961 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T08:00:04,865 DEBUG [RS:0;0106a245d0e8:38961 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0106a245d0e8:0, corePoolSize=2, maxPoolSize=2 2024-12-08T08:00:04,865 DEBUG [RS:0;0106a245d0e8:38961 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T08:00:04,865 DEBUG [RS:0;0106a245d0e8:38961 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T08:00:04,865 DEBUG [RS:0;0106a245d0e8:38961 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T08:00:04,865 DEBUG [RS:0;0106a245d0e8:38961 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T08:00:04,865 DEBUG [RS:0;0106a245d0e8:38961 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T08:00:04,865 DEBUG [RS:0;0106a245d0e8:38961 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T08:00:04,865 DEBUG [RS:0;0106a245d0e8:38961 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0106a245d0e8:0, corePoolSize=3, maxPoolSize=3 2024-12-08T08:00:04,865 DEBUG [RS:0;0106a245d0e8:38961 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0, corePoolSize=3, maxPoolSize=3 2024-12-08T08:00:04,866 INFO [RS:0;0106a245d0e8:38961 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T08:00:04,866 INFO [RS:0;0106a245d0e8:38961 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T08:00:04,866 INFO [RS:0;0106a245d0e8:38961 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T08:00:04,866 INFO [RS:0;0106a245d0e8:38961 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T08:00:04,866 INFO [RS:0;0106a245d0e8:38961 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T08:00:04,866 INFO [RS:0;0106a245d0e8:38961 {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,38961,1733644804464-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T08:00:04,878 INFO [RS:0;0106a245d0e8:38961 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T08:00:04,879 INFO [RS:0;0106a245d0e8:38961 {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,38961,1733644804464-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T08:00:04,879 INFO [RS:0;0106a245d0e8:38961 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T08:00:04,879 INFO [RS:0;0106a245d0e8:38961 {}] regionserver.Replication(171): 0106a245d0e8,38961,1733644804464 started 2024-12-08T08:00:04,892 INFO [RS:0;0106a245d0e8:38961 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T08:00:04,892 INFO [RS:0;0106a245d0e8:38961 {}] regionserver.HRegionServer(1482): Serving as 0106a245d0e8,38961,1733644804464, RpcServer on 0106a245d0e8/172.17.0.2:38961, sessionid=0x1000470676f0001 2024-12-08T08:00:04,892 DEBUG [RS:0;0106a245d0e8:38961 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T08:00:04,892 DEBUG [RS:0;0106a245d0e8:38961 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0106a245d0e8,38961,1733644804464 2024-12-08T08:00:04,892 DEBUG [RS:0;0106a245d0e8:38961 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0106a245d0e8,38961,1733644804464' 2024-12-08T08:00:04,892 DEBUG [RS:0;0106a245d0e8:38961 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T08:00:04,893 DEBUG [RS:0;0106a245d0e8:38961 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T08:00:04,893 DEBUG [RS:0;0106a245d0e8:38961 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T08:00:04,893 DEBUG [RS:0;0106a245d0e8:38961 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T08:00:04,893 DEBUG [RS:0;0106a245d0e8:38961 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0106a245d0e8,38961,1733644804464 2024-12-08T08:00:04,893 DEBUG [RS:0;0106a245d0e8:38961 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0106a245d0e8,38961,1733644804464' 2024-12-08T08:00:04,893 DEBUG [RS:0;0106a245d0e8:38961 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T08:00:04,893 DEBUG [RS:0;0106a245d0e8:38961 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T08:00:04,894 DEBUG [RS:0;0106a245d0e8:38961 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T08:00:04,894 INFO [RS:0;0106a245d0e8:38961 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T08:00:04,894 INFO [RS:0;0106a245d0e8:38961 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T08:00:04,993 WARN [0106a245d0e8:40401 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-08T08:00:04,995 INFO [RS:0;0106a245d0e8:38961 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0106a245d0e8%2C38961%2C1733644804464, suffix=, logDir=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/WALs/0106a245d0e8,38961,1733644804464, archiveDir=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/oldWALs, maxLogs=32 2024-12-08T08:00:04,996 INFO [RS:0;0106a245d0e8:38961 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C38961%2C1733644804464.1733644804996 2024-12-08T08:00:05,001 INFO [RS:0;0106a245d0e8:38961 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/WALs/0106a245d0e8,38961,1733644804464/0106a245d0e8%2C38961%2C1733644804464.1733644804996 2024-12-08T08:00:05,004 DEBUG [RS:0;0106a245d0e8:38961 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44551:44551),(127.0.0.1/127.0.0.1:37403:37403)] 2024-12-08T08:00:05,243 DEBUG [0106a245d0e8:40401 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-08T08:00:05,243 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0106a245d0e8,38961,1733644804464 2024-12-08T08:00:05,244 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0106a245d0e8,38961,1733644804464, state=OPENING 2024-12-08T08:00:05,293 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-08T08:00:05,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40401-0x1000470676f0000, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:00:05,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38961-0x1000470676f0001, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:00:05,305 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T08:00:05,305 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T08:00:05,305 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=0106a245d0e8,38961,1733644804464}] 2024-12-08T08:00:05,305 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T08:00:05,457 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-08T08:00:05,459 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58119, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-08T08:00:05,462 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-08T08:00:05,462 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T08:00:05,463 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0106a245d0e8%2C38961%2C1733644804464.meta, suffix=.meta, logDir=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/WALs/0106a245d0e8,38961,1733644804464, archiveDir=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/oldWALs, maxLogs=32 2024-12-08T08:00:05,464 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C38961%2C1733644804464.meta.1733644805464.meta 2024-12-08T08:00:05,472 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/WALs/0106a245d0e8,38961,1733644804464/0106a245d0e8%2C38961%2C1733644804464.meta.1733644805464.meta 2024-12-08T08:00:05,473 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37403:37403),(127.0.0.1/127.0.0.1:44551:44551)] 2024-12-08T08:00:05,473 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-08T08:00:05,473 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-08T08:00:05,473 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-08T08:00:05,473 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-08T08:00:05,473 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-08T08:00:05,474 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T08:00:05,474 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-08T08:00:05,474 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-08T08:00:05,475 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T08:00:05,475 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T08:00:05,475 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:00:05,476 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T08:00:05,476 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T08:00:05,476 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T08:00:05,476 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:00:05,477 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T08:00:05,477 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T08:00:05,477 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T08:00:05,478 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:00:05,478 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T08:00:05,478 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T08:00:05,479 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T08:00:05,479 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:00:05,479 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T08:00:05,479 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T08:00:05,480 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/hbase/meta/1588230740 2024-12-08T08:00:05,481 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/hbase/meta/1588230740 2024-12-08T08:00:05,482 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T08:00:05,482 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T08:00:05,482 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T08:00:05,484 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T08:00:05,484 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=840823, jitterRate=0.06916238367557526}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T08:00:05,484 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-08T08:00:05,485 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733644805474Writing region info on filesystem at 1733644805474Initializing all the Stores at 1733644805474Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644805474Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644805475 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644805475Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644805475Cleaning up temporary data from old regions at 1733644805482 (+7 ms)Running coprocessor post-open hooks at 1733644805484 (+2 ms)Region opened successfully at 1733644805485 (+1 ms) 2024-12-08T08:00:05,486 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733644805457 2024-12-08T08:00:05,488 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-08T08:00:05,488 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-08T08:00:05,489 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0106a245d0e8,38961,1733644804464 2024-12-08T08:00:05,489 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0106a245d0e8,38961,1733644804464, state=OPEN 2024-12-08T08:00:05,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38961-0x1000470676f0001, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T08:00:05,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40401-0x1000470676f0000, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T08:00:05,535 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=0106a245d0e8,38961,1733644804464 2024-12-08T08:00:05,535 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T08:00:05,535 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T08:00:05,538 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-08T08:00:05,538 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=0106a245d0e8,38961,1733644804464 in 230 msec 2024-12-08T08:00:05,540 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-08T08:00:05,540 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 698 msec 2024-12-08T08:00:05,541 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T08:00:05,541 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-08T08:00:05,542 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T08:00:05,542 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0106a245d0e8,38961,1733644804464, seqNum=-1] 2024-12-08T08:00:05,543 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T08:00:05,544 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32991, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T08:00:05,549 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 767 msec 2024-12-08T08:00:05,550 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733644805549, completionTime=-1 2024-12-08T08:00:05,550 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-08T08:00:05,550 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-08T08:00:05,552 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-08T08:00:05,552 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733644865552 2024-12-08T08:00:05,552 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733644925552 2024-12-08T08:00:05,552 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-08T08:00:05,553 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,40401,1733644804285-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T08:00:05,553 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,40401,1733644804285-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T08:00:05,553 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,40401,1733644804285-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T08:00:05,553 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0106a245d0e8:40401, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T08:00:05,553 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-08T08:00:05,553 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-08T08:00:05,555 DEBUG [master/0106a245d0e8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-08T08:00:05,557 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.032sec 2024-12-08T08:00:05,557 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-08T08:00:05,557 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-08T08:00:05,557 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-08T08:00:05,557 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-08T08:00:05,557 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-08T08:00:05,557 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,40401,1733644804285-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T08:00:05,557 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,40401,1733644804285-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-08T08:00:05,559 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-08T08:00:05,559 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-08T08:00:05,559 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,40401,1733644804285-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T08:00:05,589 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b2a39d7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T08:00:05,589 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 0106a245d0e8,40401,-1 for getting cluster id 2024-12-08T08:00:05,589 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-08T08:00:05,590 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5bf73dc0-56d3-474b-81c6-4b8feb611e99' 2024-12-08T08:00:05,590 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-08T08:00:05,591 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5bf73dc0-56d3-474b-81c6-4b8feb611e99" 2024-12-08T08:00:05,591 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54b5ba0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T08:00:05,591 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [0106a245d0e8,40401,-1] 2024-12-08T08:00:05,591 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-08T08:00:05,591 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T08:00:05,592 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:05,592 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:05,592 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47440, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-08T08:00:05,592 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:05,593 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:05,593 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:05,593 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:05,593 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ceb7ecd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T08:00:05,593 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:05,593 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T08:00:05,594 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:05,594 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0106a245d0e8,38961,1733644804464, seqNum=-1] 2024-12-08T08:00:05,595 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T08:00:05,596 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44078, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T08:00:05,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=0106a245d0e8,40401,1733644804285 2024-12-08T08:00:05,598 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T08:00:05,600 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-08T08:00:05,601 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-08T08:00:05,602 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 0106a245d0e8,40401,1733644804285 2024-12-08T08:00:05,602 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4c455bd6 2024-12-08T08:00:05,602 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T08:00:05,603 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47448, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T08:00:05,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40401 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-08T08:00:05,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40401 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-08T08:00:05,604 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40401 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T08:00:05,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40401 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-12-08T08:00:05,607 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T08:00:05,607 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:00:05,607 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40401 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-12-08T08:00:05,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40401 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T08:00:05,608 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T08:00:05,613 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:05,613 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:05,614 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:05,614 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:05,614 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:05,615 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:05,619 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:05,620 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:05,620 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:05,622 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:05,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741835_1011 (size=381) 2024-12-08T08:00:05,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741835_1011 (size=381) 2024-12-08T08:00:05,747 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:05,754 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:06,034 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 3926904d5898a944bd497c60411add81, NAME => 'TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a 2024-12-08T08:00:06,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741836_1012 (size=64) 2024-12-08T08:00:06,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741836_1012 (size=64) 2024-12-08T08:00:06,041 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T08:00:06,041 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 3926904d5898a944bd497c60411add81, disabling compactions & flushes 2024-12-08T08:00:06,041 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81. 2024-12-08T08:00:06,041 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81. 2024-12-08T08:00:06,041 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81. after waiting 0 ms 2024-12-08T08:00:06,041 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81. 2024-12-08T08:00:06,041 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81. 2024-12-08T08:00:06,041 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 3926904d5898a944bd497c60411add81: Waiting for close lock at 1733644806041Disabling compacts and flushes for region at 1733644806041Disabling writes for close at 1733644806041Writing region close event to WAL at 1733644806041Closed at 1733644806041 2024-12-08T08:00:06,042 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T08:00:06,042 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733644806042"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733644806042"}]},"ts":"1733644806042"} 2024-12-08T08:00:06,044 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-08T08:00:06,045 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T08:00:06,046 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733644806045"}]},"ts":"1733644806045"} 2024-12-08T08:00:06,048 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-12-08T08:00:06,048 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3926904d5898a944bd497c60411add81, ASSIGN}] 2024-12-08T08:00:06,049 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3926904d5898a944bd497c60411add81, ASSIGN 2024-12-08T08:00:06,050 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3926904d5898a944bd497c60411add81, ASSIGN; state=OFFLINE, location=0106a245d0e8,38961,1733644804464; forceNewPlan=false, retain=false 2024-12-08T08:00:06,129 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-08T08:00:06,130 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:06,130 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:06,130 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:06,130 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:06,130 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:06,130 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:06,131 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:06,131 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:06,150 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:06,150 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:06,150 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:06,150 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:06,151 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:06,151 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:06,154 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:06,154 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:06,154 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:06,156 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:06,201 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3926904d5898a944bd497c60411add81, regionState=OPENING, regionLocation=0106a245d0e8,38961,1733644804464 2024-12-08T08:00:06,203 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3926904d5898a944bd497c60411add81, ASSIGN because future has completed 2024-12-08T08:00:06,203 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3926904d5898a944bd497c60411add81, server=0106a245d0e8,38961,1733644804464}] 2024-12-08T08:00:06,358 INFO [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81. 2024-12-08T08:00:06,358 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 3926904d5898a944bd497c60411add81, NAME => 'TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81.', STARTKEY => '', ENDKEY => ''} 2024-12-08T08:00:06,359 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 3926904d5898a944bd497c60411add81 2024-12-08T08:00:06,359 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T08:00:06,359 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 3926904d5898a944bd497c60411add81 2024-12-08T08:00:06,359 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 3926904d5898a944bd497c60411add81 2024-12-08T08:00:06,360 INFO [StoreOpener-3926904d5898a944bd497c60411add81-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 3926904d5898a944bd497c60411add81 2024-12-08T08:00:06,361 INFO [StoreOpener-3926904d5898a944bd497c60411add81-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3926904d5898a944bd497c60411add81 columnFamilyName info 2024-12-08T08:00:06,361 DEBUG [StoreOpener-3926904d5898a944bd497c60411add81-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:00:06,361 INFO [StoreOpener-3926904d5898a944bd497c60411add81-1 {}] regionserver.HStore(327): Store=3926904d5898a944bd497c60411add81/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T08:00:06,361 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 3926904d5898a944bd497c60411add81 2024-12-08T08:00:06,362 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81 2024-12-08T08:00:06,362 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81 2024-12-08T08:00:06,363 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 3926904d5898a944bd497c60411add81 2024-12-08T08:00:06,363 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 3926904d5898a944bd497c60411add81 2024-12-08T08:00:06,364 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 3926904d5898a944bd497c60411add81 2024-12-08T08:00:06,365 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T08:00:06,366 INFO [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 3926904d5898a944bd497c60411add81; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=849456, jitterRate=0.0801394134759903}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-08T08:00:06,366 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3926904d5898a944bd497c60411add81 2024-12-08T08:00:06,366 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 3926904d5898a944bd497c60411add81: Running coprocessor pre-open hook at 1733644806359Writing region info on filesystem at 1733644806359Initializing all the Stores at 1733644806359Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644806359Cleaning up temporary data from old regions at 1733644806363 (+4 ms)Running coprocessor post-open hooks at 1733644806366 (+3 ms)Region opened successfully at 1733644806366 2024-12-08T08:00:06,367 INFO [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81., pid=6, masterSystemTime=1733644806355 2024-12-08T08:00:06,369 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81. 2024-12-08T08:00:06,369 INFO [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81. 2024-12-08T08:00:06,369 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3926904d5898a944bd497c60411add81, regionState=OPEN, openSeqNum=2, regionLocation=0106a245d0e8,38961,1733644804464 2024-12-08T08:00:06,371 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3926904d5898a944bd497c60411add81, server=0106a245d0e8,38961,1733644804464 because future has completed 2024-12-08T08:00:06,374 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-08T08:00:06,375 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 3926904d5898a944bd497c60411add81, server=0106a245d0e8,38961,1733644804464 in 169 msec 2024-12-08T08:00:06,377 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-08T08:00:06,377 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3926904d5898a944bd497c60411add81, ASSIGN in 326 msec 2024-12-08T08:00:06,378 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T08:00:06,378 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733644806378"}]},"ts":"1733644806378"} 2024-12-08T08:00:06,380 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-12-08T08:00:06,382 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T08:00:06,383 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 778 msec 2024-12-08T08:00:06,748 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:06,754 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:07,748 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:07,755 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:08,168 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-08T08:00:08,168 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-08T08:00:08,169 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T08:00:08,749 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:08,755 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:09,749 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:09,756 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:10,750 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:10,757 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:10,861 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-08T08:00:10,862 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-12-08T08:00:11,751 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:11,757 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:11,861 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-08T08:00:11,862 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:11,862 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:11,862 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:11,862 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:11,862 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:11,862 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:11,863 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:11,863 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:11,882 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:11,883 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:11,883 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:11,883 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:11,883 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:11,884 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:11,887 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:11,887 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:11,887 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:11,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:12,751 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:12,758 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:13,752 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:13,758 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:14,753 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:14,759 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:15,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40401 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T08:00:15,631 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-12-08T08:00:15,631 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-12-08T08:00:15,634 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-12-08T08:00:15,634 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81. 2024-12-08T08:00:15,636 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81., hostname=0106a245d0e8,38961,1733644804464, seqNum=2] 2024-12-08T08:00:15,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38961 {}] regionserver.HRegion(8855): Flush requested on 3926904d5898a944bd497c60411add81 2024-12-08T08:00:15,652 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3926904d5898a944bd497c60411add81 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T08:00:15,696 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38961 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=3926904d5898a944bd497c60411add81, server=0106a245d0e8,38961,1733644804464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T08:00:15,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38961 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:44078 deadline: 1733644825696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=3926904d5898a944bd497c60411add81, server=0106a245d0e8,38961,1733644804464 2024-12-08T08:00:15,710 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/.tmp/info/460c8ccea6da43798f787c106818d2f2 is 1080, key is row0001/info:/1733644815637/Put/seqid=0 2024-12-08T08:00:15,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741837_1013 (size=12509) 2024-12-08T08:00:15,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741837_1013 (size=12509) 2024-12-08T08:00:15,715 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/.tmp/info/460c8ccea6da43798f787c106818d2f2 2024-12-08T08:00:15,721 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/.tmp/info/460c8ccea6da43798f787c106818d2f2 as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/460c8ccea6da43798f787c106818d2f2 2024-12-08T08:00:15,725 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81., hostname=0106a245d0e8,38961,1733644804464, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81., hostname=0106a245d0e8,38961,1733644804464, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=3926904d5898a944bd497c60411add81, server=0106a245d0e8,38961,1733644804464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-08T08:00:15,726 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81., hostname=0106a245d0e8,38961,1733644804464, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=3926904d5898a944bd497c60411add81, server=0106a245d0e8,38961,1733644804464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-08T08:00:15,726 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81., hostname=0106a245d0e8,38961,1733644804464, seqNum=2 because the exception is null or not the one we care about 2024-12-08T08:00:15,726 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/460c8ccea6da43798f787c106818d2f2, entries=7, sequenceid=11, filesize=12.2 K 2024-12-08T08:00:15,727 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 3926904d5898a944bd497c60411add81 in 75ms, sequenceid=11, compaction requested=false 2024-12-08T08:00:15,727 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3926904d5898a944bd497c60411add81: 2024-12-08T08:00:15,754 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:15,759 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:16,755 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:16,760 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:17,756 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:17,761 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:18,757 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:18,762 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:19,757 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:19,763 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:20,758 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:20,764 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:21,759 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:21,764 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:22,759 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:22,765 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:23,760 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:23,765 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:24,760 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:24,766 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:25,761 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:25,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38961 {}] regionserver.HRegion(8855): Flush requested on 3926904d5898a944bd497c60411add81 2024-12-08T08:00:25,766 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3926904d5898a944bd497c60411add81 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-08T08:00:25,766 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:25,776 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/.tmp/info/989b106ad05b4110aefe18fb8e337a69 is 1080, key is row0008/info:/1733644815653/Put/seqid=0 2024-12-08T08:00:25,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741838_1014 (size=29761) 2024-12-08T08:00:25,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741838_1014 (size=29761) 2024-12-08T08:00:25,783 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/.tmp/info/989b106ad05b4110aefe18fb8e337a69 2024-12-08T08:00:25,791 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/.tmp/info/989b106ad05b4110aefe18fb8e337a69 as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/989b106ad05b4110aefe18fb8e337a69 2024-12-08T08:00:25,798 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/989b106ad05b4110aefe18fb8e337a69, entries=23, sequenceid=37, filesize=29.1 K 2024-12-08T08:00:25,799 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 3926904d5898a944bd497c60411add81 in 33ms, sequenceid=37, compaction requested=false 2024-12-08T08:00:25,799 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3926904d5898a944bd497c60411add81: 2024-12-08T08:00:25,799 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-12-08T08:00:25,799 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T08:00:25,799 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/989b106ad05b4110aefe18fb8e337a69 because midkey is the same as first or last row 2024-12-08T08:00:26,762 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:26,767 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:27,763 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:27,768 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:27,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38961 {}] regionserver.HRegion(8855): Flush requested on 3926904d5898a944bd497c60411add81 2024-12-08T08:00:27,786 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3926904d5898a944bd497c60411add81 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T08:00:27,792 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/.tmp/info/440054de6f8d4baeaeea099375b7b568 is 1080, key is row0031/info:/1733644825769/Put/seqid=0 2024-12-08T08:00:27,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741839_1015 (size=12509) 2024-12-08T08:00:27,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741839_1015 (size=12509) 2024-12-08T08:00:27,799 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/.tmp/info/440054de6f8d4baeaeea099375b7b568 2024-12-08T08:00:27,805 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/.tmp/info/440054de6f8d4baeaeea099375b7b568 as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/440054de6f8d4baeaeea099375b7b568 2024-12-08T08:00:27,810 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/440054de6f8d4baeaeea099375b7b568, entries=7, sequenceid=47, filesize=12.2 K 2024-12-08T08:00:27,811 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 3926904d5898a944bd497c60411add81 in 25ms, sequenceid=47, compaction requested=true 2024-12-08T08:00:27,811 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3926904d5898a944bd497c60411add81: 2024-12-08T08:00:27,811 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-12-08T08:00:27,811 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T08:00:27,811 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/989b106ad05b4110aefe18fb8e337a69 because midkey is the same as first or last row 2024-12-08T08:00:27,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3926904d5898a944bd497c60411add81:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T08:00:27,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T08:00:27,812 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T08:00:27,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38961 {}] regionserver.HRegion(8855): Flush requested on 3926904d5898a944bd497c60411add81 2024-12-08T08:00:27,812 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3926904d5898a944bd497c60411add81 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-08T08:00:27,813 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T08:00:27,813 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HStore(1541): 3926904d5898a944bd497c60411add81/info is initiating minor compaction (all files) 2024-12-08T08:00:27,813 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 3926904d5898a944bd497c60411add81/info in TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81. 2024-12-08T08:00:27,813 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/460c8ccea6da43798f787c106818d2f2, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/989b106ad05b4110aefe18fb8e337a69, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/440054de6f8d4baeaeea099375b7b568] into tmpdir=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/.tmp, totalSize=53.5 K 2024-12-08T08:00:27,813 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.Compactor(225): Compacting 460c8ccea6da43798f787c106818d2f2, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733644815637 2024-12-08T08:00:27,814 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.Compactor(225): Compacting 989b106ad05b4110aefe18fb8e337a69, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733644815653 2024-12-08T08:00:27,814 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.Compactor(225): Compacting 440054de6f8d4baeaeea099375b7b568, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733644825769 2024-12-08T08:00:27,816 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/.tmp/info/7513698ad9364fcba8ea21f8c852821e is 1080, key is row0038/info:/1733644827787/Put/seqid=0 2024-12-08T08:00:27,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741840_1016 (size=17894) 2024-12-08T08:00:27,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741840_1016 (size=17894) 2024-12-08T08:00:27,829 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3926904d5898a944bd497c60411add81#info#compaction#59 average throughput is 12.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T08:00:27,829 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/.tmp/info/1b408f83d0df41d7bd3168e4981c5e2c is 1080, key is row0001/info:/1733644815637/Put/seqid=0 2024-12-08T08:00:27,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741841_1017 (size=44978) 2024-12-08T08:00:27,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741841_1017 (size=44978) 2024-12-08T08:00:27,842 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/.tmp/info/1b408f83d0df41d7bd3168e4981c5e2c as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/1b408f83d0df41d7bd3168e4981c5e2c 2024-12-08T08:00:27,850 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 3926904d5898a944bd497c60411add81/info of 3926904d5898a944bd497c60411add81 into 1b408f83d0df41d7bd3168e4981c5e2c(size=43.9 K), total size for store is 43.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T08:00:27,850 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 3926904d5898a944bd497c60411add81: 2024-12-08T08:00:27,850 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81., storeName=3926904d5898a944bd497c60411add81/info, priority=13, startTime=1733644827811; duration=0sec 2024-12-08T08:00:27,850 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=43.9 K, sizeToCheck=16.0 K 2024-12-08T08:00:27,850 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T08:00:27,850 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/1b408f83d0df41d7bd3168e4981c5e2c because midkey is the same as first or last row 2024-12-08T08:00:27,850 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=43.9 K, sizeToCheck=16.0 K 2024-12-08T08:00:27,850 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T08:00:27,850 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/1b408f83d0df41d7bd3168e4981c5e2c because midkey is the same as first or last row 2024-12-08T08:00:27,850 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=43.9 K, sizeToCheck=16.0 K 2024-12-08T08:00:27,850 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T08:00:27,850 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/1b408f83d0df41d7bd3168e4981c5e2c because midkey is the same as first or last row 2024-12-08T08:00:27,850 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T08:00:27,851 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3926904d5898a944bd497c60411add81:info 2024-12-08T08:00:28,224 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=62 (bloomFilter=true), to=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/.tmp/info/7513698ad9364fcba8ea21f8c852821e 2024-12-08T08:00:28,236 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/.tmp/info/7513698ad9364fcba8ea21f8c852821e as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/7513698ad9364fcba8ea21f8c852821e 2024-12-08T08:00:28,242 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/7513698ad9364fcba8ea21f8c852821e, entries=12, sequenceid=62, filesize=17.5 K 2024-12-08T08:00:28,243 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=15.76 KB/16140 for 3926904d5898a944bd497c60411add81 in 431ms, sequenceid=62, compaction requested=false 2024-12-08T08:00:28,243 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3926904d5898a944bd497c60411add81: 2024-12-08T08:00:28,243 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=61.4 K, sizeToCheck=16.0 K 2024-12-08T08:00:28,243 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T08:00:28,244 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/1b408f83d0df41d7bd3168e4981c5e2c because midkey is the same as first or last row 2024-12-08T08:00:28,764 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:28,769 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:29,765 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:29,770 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:29,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38961 {}] regionserver.HRegion(8855): Flush requested on 3926904d5898a944bd497c60411add81 2024-12-08T08:00:29,852 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3926904d5898a944bd497c60411add81 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-12-08T08:00:29,860 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/.tmp/info/d74246b5952749d396e615db0b5de9c7 is 1080, key is row0050/info:/1733644827814/Put/seqid=0 2024-12-08T08:00:29,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741842_1018 (size=22222) 2024-12-08T08:00:29,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741842_1018 (size=22222) 2024-12-08T08:00:29,881 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38961 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=3926904d5898a944bd497c60411add81, server=0106a245d0e8,38961,1733644804464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T08:00:29,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38961 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:44078 deadline: 1733644839881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=3926904d5898a944bd497c60411add81, server=0106a245d0e8,38961,1733644804464 2024-12-08T08:00:29,882 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81., hostname=0106a245d0e8,38961,1733644804464, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81., hostname=0106a245d0e8,38961,1733644804464, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=3926904d5898a944bd497c60411add81, server=0106a245d0e8,38961,1733644804464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-08T08:00:29,882 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81., hostname=0106a245d0e8,38961,1733644804464, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=3926904d5898a944bd497c60411add81, server=0106a245d0e8,38961,1733644804464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-08T08:00:29,882 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81., hostname=0106a245d0e8,38961,1733644804464, seqNum=2 because the exception is null or not the one we care about 2024-12-08T08:00:30,268 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/.tmp/info/d74246b5952749d396e615db0b5de9c7 2024-12-08T08:00:30,330 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/.tmp/info/d74246b5952749d396e615db0b5de9c7 as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/d74246b5952749d396e615db0b5de9c7 2024-12-08T08:00:30,338 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/d74246b5952749d396e615db0b5de9c7, entries=16, sequenceid=82, filesize=21.7 K 2024-12-08T08:00:30,339 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=13.66 KB/13988 for 3926904d5898a944bd497c60411add81 in 487ms, sequenceid=82, compaction requested=true 2024-12-08T08:00:30,339 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3926904d5898a944bd497c60411add81: 2024-12-08T08:00:30,339 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-12-08T08:00:30,340 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T08:00:30,340 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/1b408f83d0df41d7bd3168e4981c5e2c because midkey is the same as first or last row 2024-12-08T08:00:30,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3926904d5898a944bd497c60411add81:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T08:00:30,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T08:00:30,340 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T08:00:30,342 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85094 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T08:00:30,342 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HStore(1541): 3926904d5898a944bd497c60411add81/info is initiating minor compaction (all files) 2024-12-08T08:00:30,342 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 3926904d5898a944bd497c60411add81/info in TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81. 2024-12-08T08:00:30,342 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/1b408f83d0df41d7bd3168e4981c5e2c, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/7513698ad9364fcba8ea21f8c852821e, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/d74246b5952749d396e615db0b5de9c7] into tmpdir=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/.tmp, totalSize=83.1 K 2024-12-08T08:00:30,343 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1b408f83d0df41d7bd3168e4981c5e2c, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733644815637 2024-12-08T08:00:30,344 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7513698ad9364fcba8ea21f8c852821e, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=62, earliestPutTs=1733644827787 2024-12-08T08:00:30,344 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.Compactor(225): Compacting d74246b5952749d396e615db0b5de9c7, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733644827814 2024-12-08T08:00:30,357 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3926904d5898a944bd497c60411add81#info#compaction#61 average throughput is 33.35 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T08:00:30,358 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/.tmp/info/d1da942a97ca478a8ae863e3ba8ca8cd is 1080, key is row0001/info:/1733644815637/Put/seqid=0 2024-12-08T08:00:30,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741843_1019 (size=75378) 2024-12-08T08:00:30,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741843_1019 (size=75378) 2024-12-08T08:00:30,366 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/.tmp/info/d1da942a97ca478a8ae863e3ba8ca8cd as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/d1da942a97ca478a8ae863e3ba8ca8cd 2024-12-08T08:00:30,371 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 3926904d5898a944bd497c60411add81/info of 3926904d5898a944bd497c60411add81 into d1da942a97ca478a8ae863e3ba8ca8cd(size=73.6 K), total size for store is 73.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T08:00:30,371 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 3926904d5898a944bd497c60411add81: 2024-12-08T08:00:30,371 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81., storeName=3926904d5898a944bd497c60411add81/info, priority=13, startTime=1733644830340; duration=0sec 2024-12-08T08:00:30,371 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.6 K, sizeToCheck=16.0 K 2024-12-08T08:00:30,371 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T08:00:30,372 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.6 K, sizeToCheck=16.0 K 2024-12-08T08:00:30,372 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T08:00:30,372 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.6 K, sizeToCheck=16.0 K 2024-12-08T08:00:30,372 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T08:00:30,373 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T08:00:30,373 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T08:00:30,373 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3926904d5898a944bd497c60411add81:info 2024-12-08T08:00:30,374 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40401 {}] assignment.AssignmentManager(1363): Split request from 0106a245d0e8,38961,1733644804464, parent={ENCODED => 3926904d5898a944bd497c60411add81, NAME => 'TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-08T08:00:30,378 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40401 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=0106a245d0e8,38961,1733644804464 2024-12-08T08:00:30,381 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40401 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=3926904d5898a944bd497c60411add81, daughterA=e3eb3f75a97f486e2692290569af9634, daughterB=91f928a887803a07c6b42721ad9e5c06 2024-12-08T08:00:30,382 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=3926904d5898a944bd497c60411add81, daughterA=e3eb3f75a97f486e2692290569af9634, daughterB=91f928a887803a07c6b42721ad9e5c06 2024-12-08T08:00:30,382 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=3926904d5898a944bd497c60411add81, daughterA=e3eb3f75a97f486e2692290569af9634, daughterB=91f928a887803a07c6b42721ad9e5c06 2024-12-08T08:00:30,382 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=3926904d5898a944bd497c60411add81, daughterA=e3eb3f75a97f486e2692290569af9634, daughterB=91f928a887803a07c6b42721ad9e5c06 2024-12-08T08:00:30,388 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3926904d5898a944bd497c60411add81, UNASSIGN}] 2024-12-08T08:00:30,389 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3926904d5898a944bd497c60411add81, UNASSIGN 2024-12-08T08:00:30,391 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=3926904d5898a944bd497c60411add81, regionState=CLOSING, regionLocation=0106a245d0e8,38961,1733644804464 2024-12-08T08:00:30,393 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3926904d5898a944bd497c60411add81, UNASSIGN because future has completed 2024-12-08T08:00:30,393 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-08T08:00:30,393 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3926904d5898a944bd497c60411add81, server=0106a245d0e8,38961,1733644804464}] 2024-12-08T08:00:30,553 INFO [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 3926904d5898a944bd497c60411add81 2024-12-08T08:00:30,553 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-08T08:00:30,554 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 3926904d5898a944bd497c60411add81, disabling compactions & flushes 2024-12-08T08:00:30,554 INFO [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81. 2024-12-08T08:00:30,554 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81. 2024-12-08T08:00:30,554 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81. after waiting 0 ms 2024-12-08T08:00:30,555 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81. 2024-12-08T08:00:30,555 INFO [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 3926904d5898a944bd497c60411add81 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-08T08:00:30,562 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/.tmp/info/a5994d318c114846a5eb502d37e1645f is 1080, key is row0066/info:/1733644829855/Put/seqid=0 2024-12-08T08:00:30,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741844_1020 (size=18987) 2024-12-08T08:00:30,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741844_1020 (size=18987) 2024-12-08T08:00:30,569 INFO [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=99 (bloomFilter=true), to=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/.tmp/info/a5994d318c114846a5eb502d37e1645f 2024-12-08T08:00:30,575 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/.tmp/info/a5994d318c114846a5eb502d37e1645f as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/a5994d318c114846a5eb502d37e1645f 2024-12-08T08:00:30,580 INFO [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/a5994d318c114846a5eb502d37e1645f, entries=13, sequenceid=99, filesize=18.5 K 2024-12-08T08:00:30,581 INFO [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=0 B/0 for 3926904d5898a944bd497c60411add81 in 26ms, sequenceid=99, compaction requested=false 2024-12-08T08:00:30,582 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/460c8ccea6da43798f787c106818d2f2, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/989b106ad05b4110aefe18fb8e337a69, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/1b408f83d0df41d7bd3168e4981c5e2c, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/440054de6f8d4baeaeea099375b7b568, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/7513698ad9364fcba8ea21f8c852821e, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/d74246b5952749d396e615db0b5de9c7] to archive 2024-12-08T08:00:30,583 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T08:00:30,585 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/460c8ccea6da43798f787c106818d2f2 to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/archive/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/460c8ccea6da43798f787c106818d2f2 2024-12-08T08:00:30,586 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/989b106ad05b4110aefe18fb8e337a69 to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/archive/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/989b106ad05b4110aefe18fb8e337a69 2024-12-08T08:00:30,587 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/1b408f83d0df41d7bd3168e4981c5e2c to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/archive/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/1b408f83d0df41d7bd3168e4981c5e2c 2024-12-08T08:00:30,588 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/440054de6f8d4baeaeea099375b7b568 to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/archive/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/440054de6f8d4baeaeea099375b7b568 2024-12-08T08:00:30,589 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/7513698ad9364fcba8ea21f8c852821e to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/archive/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/7513698ad9364fcba8ea21f8c852821e 2024-12-08T08:00:30,590 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/d74246b5952749d396e615db0b5de9c7 to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/archive/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/d74246b5952749d396e615db0b5de9c7 2024-12-08T08:00:30,595 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/recovered.edits/102.seqid, newMaxSeqId=102, maxSeqId=1 2024-12-08T08:00:30,596 INFO [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81. 2024-12-08T08:00:30,596 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 3926904d5898a944bd497c60411add81: Waiting for close lock at 1733644830554Running coprocessor pre-close hooks at 1733644830554Disabling compacts and flushes for region at 1733644830554Disabling writes for close at 1733644830554Obtaining lock to block concurrent updates at 1733644830555 (+1 ms)Preparing flush snapshotting stores in 3926904d5898a944bd497c60411add81 at 1733644830555Finished memstore snapshotting TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81., syncing WAL and waiting on mvcc, flushsize=dataSize=13988, getHeapSize=15216, getOffHeapSize=0, getCellsCount=13 at 1733644830555Flushing stores of TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81. at 1733644830557 (+2 ms)Flushing 3926904d5898a944bd497c60411add81/info: creating writer at 1733644830557Flushing 3926904d5898a944bd497c60411add81/info: appending metadata at 1733644830562 (+5 ms)Flushing 3926904d5898a944bd497c60411add81/info: closing flushed file at 1733644830562Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4c66f5bf: reopening flushed file at 1733644830575 (+13 ms)Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=0 B/0 for 3926904d5898a944bd497c60411add81 in 26ms, sequenceid=99, compaction requested=false at 1733644830581 (+6 ms)Writing region close event to WAL at 1733644830592 (+11 ms)Running coprocessor post-close hooks at 1733644830595 (+3 ms)Closed at 1733644830595 2024-12-08T08:00:30,598 INFO [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 3926904d5898a944bd497c60411add81 2024-12-08T08:00:30,598 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=3926904d5898a944bd497c60411add81, regionState=CLOSED 2024-12-08T08:00:30,600 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3926904d5898a944bd497c60411add81, server=0106a245d0e8,38961,1733644804464 because future has completed 2024-12-08T08:00:30,604 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-12-08T08:00:30,604 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 3926904d5898a944bd497c60411add81, server=0106a245d0e8,38961,1733644804464 in 208 msec 2024-12-08T08:00:30,606 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-08T08:00:30,606 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3926904d5898a944bd497c60411add81, UNASSIGN in 216 msec 2024-12-08T08:00:30,615 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:00:30,619 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 2 storefiles, region=3926904d5898a944bd497c60411add81, threads=2 2024-12-08T08:00:30,621 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/a5994d318c114846a5eb502d37e1645f for region: 3926904d5898a944bd497c60411add81 2024-12-08T08:00:30,621 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/d1da942a97ca478a8ae863e3ba8ca8cd for region: 3926904d5898a944bd497c60411add81 2024-12-08T08:00:30,631 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/a5994d318c114846a5eb502d37e1645f, top=true 2024-12-08T08:00:30,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741845_1021 (size=27) 2024-12-08T08:00:30,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741845_1021 (size=27) 2024-12-08T08:00:30,636 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/TestLogRolling-testLogRolling=3926904d5898a944bd497c60411add81-a5994d318c114846a5eb502d37e1645f for child: 91f928a887803a07c6b42721ad9e5c06, parent: 3926904d5898a944bd497c60411add81 2024-12-08T08:00:30,636 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/a5994d318c114846a5eb502d37e1645f for region: 3926904d5898a944bd497c60411add81 2024-12-08T08:00:30,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741846_1022 (size=27) 2024-12-08T08:00:30,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741846_1022 (size=27) 2024-12-08T08:00:30,642 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/d1da942a97ca478a8ae863e3ba8ca8cd for region: 3926904d5898a944bd497c60411add81 2024-12-08T08:00:30,644 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 3926904d5898a944bd497c60411add81 Daughter A: [hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/e3eb3f75a97f486e2692290569af9634/info/d1da942a97ca478a8ae863e3ba8ca8cd.3926904d5898a944bd497c60411add81] storefiles, Daughter B: [hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/TestLogRolling-testLogRolling=3926904d5898a944bd497c60411add81-a5994d318c114846a5eb502d37e1645f, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/d1da942a97ca478a8ae863e3ba8ca8cd.3926904d5898a944bd497c60411add81] storefiles. 2024-12-08T08:00:30,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741847_1023 (size=71) 2024-12-08T08:00:30,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741847_1023 (size=71) 2024-12-08T08:00:30,654 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:00:30,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741848_1024 (size=71) 2024-12-08T08:00:30,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741848_1024 (size=71) 2024-12-08T08:00:30,667 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:00:30,676 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/e3eb3f75a97f486e2692290569af9634/recovered.edits/102.seqid, newMaxSeqId=102, maxSeqId=-1 2024-12-08T08:00:30,678 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/recovered.edits/102.seqid, newMaxSeqId=102, maxSeqId=-1 2024-12-08T08:00:30,681 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733644830681"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1733644830681"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1733644830681"}]},"ts":"1733644830681"} 2024-12-08T08:00:30,681 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733644830378.e3eb3f75a97f486e2692290569af9634.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733644830681"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733644830681"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733644830681"}]},"ts":"1733644830681"} 2024-12-08T08:00:30,681 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733644830681"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733644830681"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733644830681"}]},"ts":"1733644830681"} 2024-12-08T08:00:30,701 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e3eb3f75a97f486e2692290569af9634, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=91f928a887803a07c6b42721ad9e5c06, ASSIGN}] 2024-12-08T08:00:30,702 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e3eb3f75a97f486e2692290569af9634, ASSIGN 2024-12-08T08:00:30,702 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=91f928a887803a07c6b42721ad9e5c06, ASSIGN 2024-12-08T08:00:30,703 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e3eb3f75a97f486e2692290569af9634, ASSIGN; state=SPLITTING_NEW, location=0106a245d0e8,38961,1733644804464; forceNewPlan=false, retain=false 2024-12-08T08:00:30,703 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=91f928a887803a07c6b42721ad9e5c06, ASSIGN; state=SPLITTING_NEW, location=0106a245d0e8,38961,1733644804464; forceNewPlan=false, retain=false 2024-12-08T08:00:30,766 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:30,771 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:30,854 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=e3eb3f75a97f486e2692290569af9634, regionState=OPENING, regionLocation=0106a245d0e8,38961,1733644804464 2024-12-08T08:00:30,854 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=91f928a887803a07c6b42721ad9e5c06, regionState=OPENING, regionLocation=0106a245d0e8,38961,1733644804464 2024-12-08T08:00:30,858 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=91f928a887803a07c6b42721ad9e5c06, ASSIGN because future has completed 2024-12-08T08:00:30,859 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 91f928a887803a07c6b42721ad9e5c06, server=0106a245d0e8,38961,1733644804464}] 2024-12-08T08:00:30,860 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e3eb3f75a97f486e2692290569af9634, ASSIGN because future has completed 2024-12-08T08:00:30,861 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure e3eb3f75a97f486e2692290569af9634, server=0106a245d0e8,38961,1733644804464}] 2024-12-08T08:00:31,022 INFO [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06. 2024-12-08T08:00:31,022 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 91f928a887803a07c6b42721ad9e5c06, NAME => 'TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06.', STARTKEY => 'row0062', ENDKEY => ''} 2024-12-08T08:00:31,022 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 91f928a887803a07c6b42721ad9e5c06 2024-12-08T08:00:31,022 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T08:00:31,023 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 91f928a887803a07c6b42721ad9e5c06 2024-12-08T08:00:31,023 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 91f928a887803a07c6b42721ad9e5c06 2024-12-08T08:00:31,025 INFO [StoreOpener-91f928a887803a07c6b42721ad9e5c06-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 91f928a887803a07c6b42721ad9e5c06 2024-12-08T08:00:31,026 INFO [StoreOpener-91f928a887803a07c6b42721ad9e5c06-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 91f928a887803a07c6b42721ad9e5c06 columnFamilyName info 2024-12-08T08:00:31,026 DEBUG [StoreOpener-91f928a887803a07c6b42721ad9e5c06-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:00:31,034 DEBUG [StoreOpener-91f928a887803a07c6b42721ad9e5c06-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/TestLogRolling-testLogRolling=3926904d5898a944bd497c60411add81-a5994d318c114846a5eb502d37e1645f 2024-12-08T08:00:31,039 DEBUG [StoreOpener-91f928a887803a07c6b42721ad9e5c06-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/d1da942a97ca478a8ae863e3ba8ca8cd.3926904d5898a944bd497c60411add81->hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/d1da942a97ca478a8ae863e3ba8ca8cd-top 2024-12-08T08:00:31,040 INFO [StoreOpener-91f928a887803a07c6b42721ad9e5c06-1 {}] regionserver.HStore(327): Store=91f928a887803a07c6b42721ad9e5c06/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T08:00:31,040 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 91f928a887803a07c6b42721ad9e5c06 2024-12-08T08:00:31,041 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06 2024-12-08T08:00:31,042 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06 2024-12-08T08:00:31,043 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 91f928a887803a07c6b42721ad9e5c06 2024-12-08T08:00:31,043 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 91f928a887803a07c6b42721ad9e5c06 2024-12-08T08:00:31,045 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 91f928a887803a07c6b42721ad9e5c06 2024-12-08T08:00:31,046 INFO [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 91f928a887803a07c6b42721ad9e5c06; next sequenceid=103; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=852123, jitterRate=0.08353106677532196}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-08T08:00:31,046 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 91f928a887803a07c6b42721ad9e5c06 2024-12-08T08:00:31,047 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 91f928a887803a07c6b42721ad9e5c06: Running coprocessor pre-open hook at 1733644831023Writing region info on filesystem at 1733644831023Initializing all the Stores at 1733644831024 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644831024Cleaning up temporary data from old regions at 1733644831043 (+19 ms)Running coprocessor post-open hooks at 1733644831046 (+3 ms)Region opened successfully at 1733644831047 (+1 ms) 2024-12-08T08:00:31,048 INFO [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06., pid=12, masterSystemTime=1733644831013 2024-12-08T08:00:31,048 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 91f928a887803a07c6b42721ad9e5c06:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T08:00:31,048 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-08T08:00:31,048 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T08:00:31,049 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06. 2024-12-08T08:00:31,049 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HStore(1541): 91f928a887803a07c6b42721ad9e5c06/info is initiating minor compaction (all files) 2024-12-08T08:00:31,049 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 91f928a887803a07c6b42721ad9e5c06/info in TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06. 2024-12-08T08:00:31,049 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/d1da942a97ca478a8ae863e3ba8ca8cd.3926904d5898a944bd497c60411add81->hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/d1da942a97ca478a8ae863e3ba8ca8cd-top, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/TestLogRolling-testLogRolling=3926904d5898a944bd497c60411add81-a5994d318c114846a5eb502d37e1645f] into tmpdir=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp, totalSize=92.2 K 2024-12-08T08:00:31,050 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.Compactor(225): Compacting d1da942a97ca478a8ae863e3ba8ca8cd.3926904d5898a944bd497c60411add81, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1733644815637 2024-12-08T08:00:31,050 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06. 2024-12-08T08:00:31,050 INFO [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06. 2024-12-08T08:00:31,050 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=3926904d5898a944bd497c60411add81-a5994d318c114846a5eb502d37e1645f, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1733644829855 2024-12-08T08:00:31,051 INFO [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733644830378.e3eb3f75a97f486e2692290569af9634. 2024-12-08T08:00:31,051 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => e3eb3f75a97f486e2692290569af9634, NAME => 'TestLogRolling-testLogRolling,,1733644830378.e3eb3f75a97f486e2692290569af9634.', STARTKEY => '', ENDKEY => 'row0062'} 2024-12-08T08:00:31,051 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling e3eb3f75a97f486e2692290569af9634 2024-12-08T08:00:31,051 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733644830378.e3eb3f75a97f486e2692290569af9634.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T08:00:31,051 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for e3eb3f75a97f486e2692290569af9634 2024-12-08T08:00:31,051 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for e3eb3f75a97f486e2692290569af9634 2024-12-08T08:00:31,051 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=91f928a887803a07c6b42721ad9e5c06, regionState=OPEN, openSeqNum=103, regionLocation=0106a245d0e8,38961,1733644804464 2024-12-08T08:00:31,053 INFO [StoreOpener-e3eb3f75a97f486e2692290569af9634-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region e3eb3f75a97f486e2692290569af9634 2024-12-08T08:00:31,053 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38961 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-12-08T08:00:31,053 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-08T08:00:31,053 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-12-08T08:00:31,053 INFO [StoreOpener-e3eb3f75a97f486e2692290569af9634-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e3eb3f75a97f486e2692290569af9634 columnFamilyName info 2024-12-08T08:00:31,053 DEBUG [StoreOpener-e3eb3f75a97f486e2692290569af9634-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:00:31,053 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 91f928a887803a07c6b42721ad9e5c06, server=0106a245d0e8,38961,1733644804464 because future has completed 2024-12-08T08:00:31,068 DEBUG [StoreOpener-e3eb3f75a97f486e2692290569af9634-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/e3eb3f75a97f486e2692290569af9634/info/d1da942a97ca478a8ae863e3ba8ca8cd.3926904d5898a944bd497c60411add81->hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/d1da942a97ca478a8ae863e3ba8ca8cd-bottom 2024-12-08T08:00:31,069 INFO [StoreOpener-e3eb3f75a97f486e2692290569af9634-1 {}] regionserver.HStore(327): Store=e3eb3f75a97f486e2692290569af9634/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T08:00:31,069 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for e3eb3f75a97f486e2692290569af9634 2024-12-08T08:00:31,069 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-12-08T08:00:31,070 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 91f928a887803a07c6b42721ad9e5c06, server=0106a245d0e8,38961,1733644804464 in 203 msec 2024-12-08T08:00:31,070 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/e3eb3f75a97f486e2692290569af9634 2024-12-08T08:00:31,071 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/e3eb3f75a97f486e2692290569af9634 2024-12-08T08:00:31,071 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for e3eb3f75a97f486e2692290569af9634 2024-12-08T08:00:31,071 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for e3eb3f75a97f486e2692290569af9634 2024-12-08T08:00:31,072 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=91f928a887803a07c6b42721ad9e5c06, ASSIGN in 369 msec 2024-12-08T08:00:31,073 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for e3eb3f75a97f486e2692290569af9634 2024-12-08T08:00:31,074 INFO [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened e3eb3f75a97f486e2692290569af9634; next sequenceid=103; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=724050, jitterRate=-0.07932287454605103}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-08T08:00:31,074 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e3eb3f75a97f486e2692290569af9634 2024-12-08T08:00:31,074 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for e3eb3f75a97f486e2692290569af9634: Running coprocessor pre-open hook at 1733644831051Writing region info on filesystem at 1733644831051Initializing all the Stores at 1733644831052 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644831052Cleaning up temporary data from old regions at 1733644831071 (+19 ms)Running coprocessor post-open hooks at 1733644831074 (+3 ms)Region opened successfully at 1733644831074 2024-12-08T08:00:31,075 INFO [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733644830378.e3eb3f75a97f486e2692290569af9634., pid=13, masterSystemTime=1733644831013 2024-12-08T08:00:31,075 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store e3eb3f75a97f486e2692290569af9634:info, priority=-2147483648, current under compaction store size is 2 2024-12-08T08:00:31,075 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T08:00:31,075 DEBUG [RS:0;0106a245d0e8:38961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-08T08:00:31,076 INFO [RS:0;0106a245d0e8:38961-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1733644830378.e3eb3f75a97f486e2692290569af9634. 2024-12-08T08:00:31,076 DEBUG [RS:0;0106a245d0e8:38961-longCompactions-0 {}] regionserver.HStore(1541): e3eb3f75a97f486e2692290569af9634/info is initiating minor compaction (all files) 2024-12-08T08:00:31,076 INFO [RS:0;0106a245d0e8:38961-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e3eb3f75a97f486e2692290569af9634/info in TestLogRolling-testLogRolling,,1733644830378.e3eb3f75a97f486e2692290569af9634. 2024-12-08T08:00:31,076 INFO [RS:0;0106a245d0e8:38961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/e3eb3f75a97f486e2692290569af9634/info/d1da942a97ca478a8ae863e3ba8ca8cd.3926904d5898a944bd497c60411add81->hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/d1da942a97ca478a8ae863e3ba8ca8cd-bottom] into tmpdir=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/e3eb3f75a97f486e2692290569af9634/.tmp, totalSize=73.6 K 2024-12-08T08:00:31,076 DEBUG [RS:0;0106a245d0e8:38961-longCompactions-0 {}] compactions.Compactor(225): Compacting d1da942a97ca478a8ae863e3ba8ca8cd.3926904d5898a944bd497c60411add81, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733644815637 2024-12-08T08:00:31,077 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/hbase/meta/1588230740/.tmp/info/9db6d0d51e664c53a2baef0bd93f7a2a is 193, key is TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06./info:regioninfo/1733644831051/Put/seqid=0 2024-12-08T08:00:31,077 DEBUG [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733644830378.e3eb3f75a97f486e2692290569af9634. 2024-12-08T08:00:31,077 INFO [RS_OPEN_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733644830378.e3eb3f75a97f486e2692290569af9634. 2024-12-08T08:00:31,078 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=e3eb3f75a97f486e2692290569af9634, regionState=OPEN, openSeqNum=103, regionLocation=0106a245d0e8,38961,1733644804464 2024-12-08T08:00:31,080 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure e3eb3f75a97f486e2692290569af9634, server=0106a245d0e8,38961,1733644804464 because future has completed 2024-12-08T08:00:31,082 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 91f928a887803a07c6b42721ad9e5c06#info#compaction#64 average throughput is 17.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T08:00:31,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741849_1025 (size=9882) 2024-12-08T08:00:31,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741849_1025 (size=9882) 2024-12-08T08:00:31,083 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/df668daf528443f7b27be216c0a1d63c is 1080, key is row0062/info:/1733644827842/Put/seqid=0 2024-12-08T08:00:31,083 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/hbase/meta/1588230740/.tmp/info/9db6d0d51e664c53a2baef0bd93f7a2a 2024-12-08T08:00:31,084 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-12-08T08:00:31,084 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure e3eb3f75a97f486e2692290569af9634, server=0106a245d0e8,38961,1733644804464 in 220 msec 2024-12-08T08:00:31,087 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=7 2024-12-08T08:00:31,088 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e3eb3f75a97f486e2692290569af9634, ASSIGN in 383 msec 2024-12-08T08:00:31,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741850_1026 (size=23465) 2024-12-08T08:00:31,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741850_1026 (size=23465) 2024-12-08T08:00:31,091 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=3926904d5898a944bd497c60411add81, daughterA=e3eb3f75a97f486e2692290569af9634, daughterB=91f928a887803a07c6b42721ad9e5c06 in 710 msec 2024-12-08T08:00:31,096 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/df668daf528443f7b27be216c0a1d63c as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/df668daf528443f7b27be216c0a1d63c 2024-12-08T08:00:31,101 INFO [RS:0;0106a245d0e8:38961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3eb3f75a97f486e2692290569af9634#info#compaction#65 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T08:00:31,102 DEBUG [RS:0;0106a245d0e8:38961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/e3eb3f75a97f486e2692290569af9634/.tmp/info/7078364aecc94e8b84757ff4e0785ec4 is 1080, key is row0001/info:/1733644815637/Put/seqid=0 2024-12-08T08:00:31,103 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 2 (all) file(s) in 91f928a887803a07c6b42721ad9e5c06/info of 91f928a887803a07c6b42721ad9e5c06 into df668daf528443f7b27be216c0a1d63c(size=22.9 K), total size for store is 22.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T08:00:31,103 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 91f928a887803a07c6b42721ad9e5c06: 2024-12-08T08:00:31,103 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06., storeName=91f928a887803a07c6b42721ad9e5c06/info, priority=14, startTime=1733644831048; duration=0sec 2024-12-08T08:00:31,103 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T08:00:31,103 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 91f928a887803a07c6b42721ad9e5c06:info 2024-12-08T08:00:31,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741851_1027 (size=70862) 2024-12-08T08:00:31,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741851_1027 (size=70862) 2024-12-08T08:00:31,106 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/hbase/meta/1588230740/.tmp/ns/3ecf49d0d1b24f74b1a5e429c8cbdd08 is 43, key is default/ns:d/1733644805544/Put/seqid=0 2024-12-08T08:00:31,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741852_1028 (size=5153) 2024-12-08T08:00:31,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741852_1028 (size=5153) 2024-12-08T08:00:31,111 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/hbase/meta/1588230740/.tmp/ns/3ecf49d0d1b24f74b1a5e429c8cbdd08 2024-12-08T08:00:31,111 DEBUG [RS:0;0106a245d0e8:38961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/e3eb3f75a97f486e2692290569af9634/.tmp/info/7078364aecc94e8b84757ff4e0785ec4 as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/e3eb3f75a97f486e2692290569af9634/info/7078364aecc94e8b84757ff4e0785ec4 2024-12-08T08:00:31,117 INFO [RS:0;0106a245d0e8:38961-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in e3eb3f75a97f486e2692290569af9634/info of e3eb3f75a97f486e2692290569af9634 into 7078364aecc94e8b84757ff4e0785ec4(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T08:00:31,117 DEBUG [RS:0;0106a245d0e8:38961-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e3eb3f75a97f486e2692290569af9634: 2024-12-08T08:00:31,117 INFO [RS:0;0106a245d0e8:38961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733644830378.e3eb3f75a97f486e2692290569af9634., storeName=e3eb3f75a97f486e2692290569af9634/info, priority=15, startTime=1733644831075; duration=0sec 2024-12-08T08:00:31,117 DEBUG [RS:0;0106a245d0e8:38961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T08:00:31,117 DEBUG [RS:0;0106a245d0e8:38961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3eb3f75a97f486e2692290569af9634:info 2024-12-08T08:00:31,129 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/hbase/meta/1588230740/.tmp/table/9a05553883044440828f2d656c622b9d is 65, key is TestLogRolling-testLogRolling/table:state/1733644806378/Put/seqid=0 2024-12-08T08:00:31,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741853_1029 (size=5340) 2024-12-08T08:00:31,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741853_1029 (size=5340) 2024-12-08T08:00:31,133 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/hbase/meta/1588230740/.tmp/table/9a05553883044440828f2d656c622b9d 2024-12-08T08:00:31,138 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/hbase/meta/1588230740/.tmp/info/9db6d0d51e664c53a2baef0bd93f7a2a as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/hbase/meta/1588230740/info/9db6d0d51e664c53a2baef0bd93f7a2a 2024-12-08T08:00:31,142 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/hbase/meta/1588230740/info/9db6d0d51e664c53a2baef0bd93f7a2a, entries=30, sequenceid=17, filesize=9.7 K 2024-12-08T08:00:31,142 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/hbase/meta/1588230740/.tmp/ns/3ecf49d0d1b24f74b1a5e429c8cbdd08 as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/hbase/meta/1588230740/ns/3ecf49d0d1b24f74b1a5e429c8cbdd08 2024-12-08T08:00:31,146 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/hbase/meta/1588230740/ns/3ecf49d0d1b24f74b1a5e429c8cbdd08, entries=2, sequenceid=17, filesize=5.0 K 2024-12-08T08:00:31,147 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/hbase/meta/1588230740/.tmp/table/9a05553883044440828f2d656c622b9d as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/hbase/meta/1588230740/table/9a05553883044440828f2d656c622b9d 2024-12-08T08:00:31,150 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/hbase/meta/1588230740/table/9a05553883044440828f2d656c622b9d, entries=2, sequenceid=17, filesize=5.2 K 2024-12-08T08:00:31,151 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 98ms, sequenceid=17, compaction requested=false 2024-12-08T08:00:31,151 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-08T08:00:31,767 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:31,771 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:32,768 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:32,772 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:33,769 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:33,773 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:34,263 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T08:00:34,770 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:34,774 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:35,597 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:35,597 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:35,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:35,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:35,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:35,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:35,601 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:35,601 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:35,618 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:35,618 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:35,619 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:35,619 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:35,619 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:35,619 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:35,622 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:35,622 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:35,623 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:35,624 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:35,771 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:35,775 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:36,131 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-08T08:00:36,133 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:36,133 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:36,134 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:36,134 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:36,134 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:36,135 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:36,136 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:36,137 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:36,158 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:36,158 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:36,159 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:36,159 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:36,159 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:36,159 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:36,161 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:36,162 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:36,162 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:36,164 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:00:36,772 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:36,776 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:37,773 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:37,777 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:38,775 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:38,778 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:39,776 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:39,779 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:39,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38961 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:44078 deadline: 1733644849922, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81. is not online on 0106a245d0e8,38961,1733644804464 2024-12-08T08:00:39,924 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81., hostname=0106a245d0e8,38961,1733644804464, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81., hostname=0106a245d0e8,38961,1733644804464, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81. is not online on 0106a245d0e8,38961,1733644804464 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-08T08:00:39,925 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81., hostname=0106a245d0e8,38961,1733644804464, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81. is not online on 0106a245d0e8,38961,1733644804464 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-08T08:00:39,925 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1733644805603.3926904d5898a944bd497c60411add81., hostname=0106a245d0e8,38961,1733644804464, seqNum=2 from cache 2024-12-08T08:00:40,777 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:40,780 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:41,779 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:41,781 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:42,780 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:42,782 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:43,781 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:43,783 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:44,782 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:44,784 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:45,560 INFO [master/0106a245d0e8:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-08T08:00:45,560 INFO [master/0106a245d0e8:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-08T08:00:45,783 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:45,785 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:46,784 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:46,785 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:47,785 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:47,786 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:48,786 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:48,787 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:49,787 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:49,788 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:50,474 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-12-08T08:00:50,789 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:50,789 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:51,790 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:51,790 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:52,791 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:52,791 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:53,792 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:53,792 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:54,794 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:54,793 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:55,795 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:55,795 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:56,796 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:56,796 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:57,797 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:57,797 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:58,726 DEBUG [master/0106a245d0e8:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=4, created chunk count=9, reused chunk count=56, reuseRatio=86.15% 2024-12-08T08:00:58,726 DEBUG [master/0106a245d0e8:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-08T08:00:58,798 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:58,798 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:59,799 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:00:59,799 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:00,107 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0079', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06., hostname=0106a245d0e8,38961,1733644804464, seqNum=103] 2024-12-08T08:01:00,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38961 {}] regionserver.HRegion(8855): Flush requested on 91f928a887803a07c6b42721ad9e5c06 2024-12-08T08:01:00,118 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 91f928a887803a07c6b42721ad9e5c06 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T08:01:00,124 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/6474843eaffe482584cb2f2200b72a6b is 1080, key is row0079/info:/1733644860107/Put/seqid=0 2024-12-08T08:01:00,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741854_1030 (size=12509) 2024-12-08T08:01:00,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741854_1030 (size=12509) 2024-12-08T08:01:00,133 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/6474843eaffe482584cb2f2200b72a6b 2024-12-08T08:01:00,138 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/6474843eaffe482584cb2f2200b72a6b as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/6474843eaffe482584cb2f2200b72a6b 2024-12-08T08:01:00,143 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/6474843eaffe482584cb2f2200b72a6b, entries=7, sequenceid=113, filesize=12.2 K 2024-12-08T08:01:00,144 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 91f928a887803a07c6b42721ad9e5c06 in 26ms, sequenceid=113, compaction requested=false 2024-12-08T08:01:00,144 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 91f928a887803a07c6b42721ad9e5c06: 2024-12-08T08:01:00,800 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:00,800 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:01,801 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:01,801 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:02,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38961 {}] regionserver.HRegion(8855): Flush requested on 91f928a887803a07c6b42721ad9e5c06 2024-12-08T08:01:02,141 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 91f928a887803a07c6b42721ad9e5c06 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-08T08:01:02,145 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/df2f2c367f374ace98371a56fb939634 is 1080, key is row0086/info:/1733644860120/Put/seqid=0 2024-12-08T08:01:02,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741855_1031 (size=17894) 2024-12-08T08:01:02,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741855_1031 (size=17894) 2024-12-08T08:01:02,150 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/df2f2c367f374ace98371a56fb939634 2024-12-08T08:01:02,156 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/df2f2c367f374ace98371a56fb939634 as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/df2f2c367f374ace98371a56fb939634 2024-12-08T08:01:02,161 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/df2f2c367f374ace98371a56fb939634, entries=12, sequenceid=128, filesize=17.5 K 2024-12-08T08:01:02,162 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for 91f928a887803a07c6b42721ad9e5c06 in 21ms, sequenceid=128, compaction requested=true 2024-12-08T08:01:02,162 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 91f928a887803a07c6b42721ad9e5c06: 2024-12-08T08:01:02,162 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 91f928a887803a07c6b42721ad9e5c06:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T08:01:02,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T08:01:02,163 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T08:01:02,164 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 53868 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T08:01:02,164 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HStore(1541): 91f928a887803a07c6b42721ad9e5c06/info is initiating minor compaction (all files) 2024-12-08T08:01:02,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38961 {}] regionserver.HRegion(8855): Flush requested on 91f928a887803a07c6b42721ad9e5c06 2024-12-08T08:01:02,164 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 91f928a887803a07c6b42721ad9e5c06/info in TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06. 2024-12-08T08:01:02,164 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/df668daf528443f7b27be216c0a1d63c, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/6474843eaffe482584cb2f2200b72a6b, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/df2f2c367f374ace98371a56fb939634] into tmpdir=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp, totalSize=52.6 K 2024-12-08T08:01:02,164 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 91f928a887803a07c6b42721ad9e5c06 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-08T08:01:02,164 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.Compactor(225): Compacting df668daf528443f7b27be216c0a1d63c, keycount=17, bloomtype=ROW, size=22.9 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1733644827842 2024-12-08T08:01:02,165 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6474843eaffe482584cb2f2200b72a6b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1733644860107 2024-12-08T08:01:02,165 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.Compactor(225): Compacting df2f2c367f374ace98371a56fb939634, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1733644860120 2024-12-08T08:01:02,170 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/e9300060b10e467181f71642d484db0f is 1080, key is row0098/info:/1733644862142/Put/seqid=0 2024-12-08T08:01:02,182 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 91f928a887803a07c6b42721ad9e5c06#info#compaction#71 average throughput is 36.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T08:01:02,183 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/3c2de12e048044d0a8032f59bb0156a8 is 1080, key is row0062/info:/1733644827842/Put/seqid=0 2024-12-08T08:01:02,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741856_1032 (size=16828) 2024-12-08T08:01:02,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741856_1032 (size=16828) 2024-12-08T08:01:02,191 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=142 (bloomFilter=true), to=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/e9300060b10e467181f71642d484db0f 2024-12-08T08:01:02,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741857_1033 (size=44066) 2024-12-08T08:01:02,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741857_1033 (size=44066) 2024-12-08T08:01:02,196 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/e9300060b10e467181f71642d484db0f as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/e9300060b10e467181f71642d484db0f 2024-12-08T08:01:02,202 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/3c2de12e048044d0a8032f59bb0156a8 as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/3c2de12e048044d0a8032f59bb0156a8 2024-12-08T08:01:02,202 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/e9300060b10e467181f71642d484db0f, entries=11, sequenceid=142, filesize=16.4 K 2024-12-08T08:01:02,203 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=18.91 KB/19368 for 91f928a887803a07c6b42721ad9e5c06 in 39ms, sequenceid=142, compaction requested=false 2024-12-08T08:01:02,203 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 91f928a887803a07c6b42721ad9e5c06: 2024-12-08T08:01:02,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38961 {}] regionserver.HRegion(8855): Flush requested on 91f928a887803a07c6b42721ad9e5c06 2024-12-08T08:01:02,205 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 91f928a887803a07c6b42721ad9e5c06 1/1 column families, dataSize=19.96 KB heapSize=21.63 KB 2024-12-08T08:01:02,210 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 91f928a887803a07c6b42721ad9e5c06/info of 91f928a887803a07c6b42721ad9e5c06 into 3c2de12e048044d0a8032f59bb0156a8(size=43.0 K), total size for store is 59.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T08:01:02,210 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 91f928a887803a07c6b42721ad9e5c06: 2024-12-08T08:01:02,210 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/518bd57d29834d37b01a5278e1384acc is 1080, key is row0109/info:/1733644862165/Put/seqid=0 2024-12-08T08:01:02,210 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06., storeName=91f928a887803a07c6b42721ad9e5c06/info, priority=13, startTime=1733644862162; duration=0sec 2024-12-08T08:01:02,210 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T08:01:02,210 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 91f928a887803a07c6b42721ad9e5c06:info 2024-12-08T08:01:02,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741858_1034 (size=25472) 2024-12-08T08:01:02,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741858_1034 (size=25472) 2024-12-08T08:01:02,618 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=19.96 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/518bd57d29834d37b01a5278e1384acc 2024-12-08T08:01:02,629 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/518bd57d29834d37b01a5278e1384acc as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/518bd57d29834d37b01a5278e1384acc 2024-12-08T08:01:02,633 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/518bd57d29834d37b01a5278e1384acc, entries=19, sequenceid=164, filesize=24.9 K 2024-12-08T08:01:02,633 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~19.96 KB/20444, heapSize ~21.61 KB/22128, currentSize=1.05 KB/1076 for 91f928a887803a07c6b42721ad9e5c06 in 428ms, sequenceid=164, compaction requested=true 2024-12-08T08:01:02,634 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 91f928a887803a07c6b42721ad9e5c06: 2024-12-08T08:01:02,634 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 91f928a887803a07c6b42721ad9e5c06:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T08:01:02,634 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T08:01:02,634 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T08:01:02,635 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 86366 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T08:01:02,635 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HStore(1541): 91f928a887803a07c6b42721ad9e5c06/info is initiating minor compaction (all files) 2024-12-08T08:01:02,635 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 91f928a887803a07c6b42721ad9e5c06/info in TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06. 2024-12-08T08:01:02,635 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/3c2de12e048044d0a8032f59bb0156a8, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/e9300060b10e467181f71642d484db0f, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/518bd57d29834d37b01a5278e1384acc] into tmpdir=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp, totalSize=84.3 K 2024-12-08T08:01:02,635 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3c2de12e048044d0a8032f59bb0156a8, keycount=36, bloomtype=ROW, size=43.0 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1733644827842 2024-12-08T08:01:02,636 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.Compactor(225): Compacting e9300060b10e467181f71642d484db0f, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=142, earliestPutTs=1733644862142 2024-12-08T08:01:02,636 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.Compactor(225): Compacting 518bd57d29834d37b01a5278e1384acc, keycount=19, bloomtype=ROW, size=24.9 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1733644862165 2024-12-08T08:01:02,648 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 91f928a887803a07c6b42721ad9e5c06#info#compaction#73 average throughput is 33.86 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T08:01:02,648 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/5f5274abb640478ab22dc04913c11091 is 1080, key is row0062/info:/1733644827842/Put/seqid=0 2024-12-08T08:01:02,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741859_1035 (size=76649) 2024-12-08T08:01:02,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741859_1035 (size=76649) 2024-12-08T08:01:02,657 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/5f5274abb640478ab22dc04913c11091 as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/5f5274abb640478ab22dc04913c11091 2024-12-08T08:01:02,664 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 91f928a887803a07c6b42721ad9e5c06/info of 91f928a887803a07c6b42721ad9e5c06 into 5f5274abb640478ab22dc04913c11091(size=74.9 K), total size for store is 74.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T08:01:02,664 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 91f928a887803a07c6b42721ad9e5c06: 2024-12-08T08:01:02,664 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06., storeName=91f928a887803a07c6b42721ad9e5c06/info, priority=13, startTime=1733644862634; duration=0sec 2024-12-08T08:01:02,664 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T08:01:02,664 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 91f928a887803a07c6b42721ad9e5c06:info 2024-12-08T08:01:02,802 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:02,802 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:03,804 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:03,804 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:04,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38961 {}] regionserver.HRegion(8855): Flush requested on 91f928a887803a07c6b42721ad9e5c06 2024-12-08T08:01:04,222 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 91f928a887803a07c6b42721ad9e5c06 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T08:01:04,226 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/b07afe129b2c4853a1f5a7d041bd4c25 is 1080, key is row0128/info:/1733644862206/Put/seqid=0 2024-12-08T08:01:04,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741860_1036 (size=12516) 2024-12-08T08:01:04,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741860_1036 (size=12516) 2024-12-08T08:01:04,231 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/b07afe129b2c4853a1f5a7d041bd4c25 2024-12-08T08:01:04,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/b07afe129b2c4853a1f5a7d041bd4c25 as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/b07afe129b2c4853a1f5a7d041bd4c25 2024-12-08T08:01:04,243 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/b07afe129b2c4853a1f5a7d041bd4c25, entries=7, sequenceid=176, filesize=12.2 K 2024-12-08T08:01:04,244 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 91f928a887803a07c6b42721ad9e5c06 in 23ms, sequenceid=176, compaction requested=false 2024-12-08T08:01:04,244 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 91f928a887803a07c6b42721ad9e5c06: 2024-12-08T08:01:04,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38961 {}] regionserver.HRegion(8855): Flush requested on 91f928a887803a07c6b42721ad9e5c06 2024-12-08T08:01:04,246 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 91f928a887803a07c6b42721ad9e5c06 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-08T08:01:04,250 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/be5cd5c0b9b64e7ea7c39aff9e6fb170 is 1080, key is row0135/info:/1733644864223/Put/seqid=0 2024-12-08T08:01:04,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741861_1037 (size=19000) 2024-12-08T08:01:04,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741861_1037 (size=19000) 2024-12-08T08:01:04,259 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=192 (bloomFilter=true), to=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/be5cd5c0b9b64e7ea7c39aff9e6fb170 2024-12-08T08:01:04,263 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T08:01:04,264 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/be5cd5c0b9b64e7ea7c39aff9e6fb170 as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/be5cd5c0b9b64e7ea7c39aff9e6fb170 2024-12-08T08:01:04,268 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/be5cd5c0b9b64e7ea7c39aff9e6fb170, entries=13, sequenceid=192, filesize=18.6 K 2024-12-08T08:01:04,269 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=11.56 KB/11836 for 91f928a887803a07c6b42721ad9e5c06 in 23ms, sequenceid=192, compaction requested=true 2024-12-08T08:01:04,269 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 91f928a887803a07c6b42721ad9e5c06: 2024-12-08T08:01:04,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 91f928a887803a07c6b42721ad9e5c06:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T08:01:04,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T08:01:04,269 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T08:01:04,270 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 108165 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T08:01:04,270 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HStore(1541): 91f928a887803a07c6b42721ad9e5c06/info is initiating minor compaction (all files) 2024-12-08T08:01:04,270 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 91f928a887803a07c6b42721ad9e5c06/info in TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06. 2024-12-08T08:01:04,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38961 {}] regionserver.HRegion(8855): Flush requested on 91f928a887803a07c6b42721ad9e5c06 2024-12-08T08:01:04,271 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/5f5274abb640478ab22dc04913c11091, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/b07afe129b2c4853a1f5a7d041bd4c25, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/be5cd5c0b9b64e7ea7c39aff9e6fb170] into tmpdir=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp, totalSize=105.6 K 2024-12-08T08:01:04,271 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 91f928a887803a07c6b42721ad9e5c06 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-08T08:01:04,271 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5f5274abb640478ab22dc04913c11091, keycount=66, bloomtype=ROW, size=74.9 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1733644827842 2024-12-08T08:01:04,271 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.Compactor(225): Compacting b07afe129b2c4853a1f5a7d041bd4c25, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1733644862206 2024-12-08T08:01:04,272 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.Compactor(225): Compacting be5cd5c0b9b64e7ea7c39aff9e6fb170, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1733644864223 2024-12-08T08:01:04,274 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/e773e7e2b7ea44f58279bef7af38d5d1 is 1080, key is row0148/info:/1733644864247/Put/seqid=0 2024-12-08T08:01:04,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741862_1038 (size=19000) 2024-12-08T08:01:04,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741862_1038 (size=19000) 2024-12-08T08:01:04,278 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/e773e7e2b7ea44f58279bef7af38d5d1 2024-12-08T08:01:04,282 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 91f928a887803a07c6b42721ad9e5c06#info#compaction#77 average throughput is 88.25 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T08:01:04,283 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/fb06259478764a62b7dfba2fa4cb22f5 is 1080, key is row0062/info:/1733644827842/Put/seqid=0 2024-12-08T08:01:04,283 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/e773e7e2b7ea44f58279bef7af38d5d1 as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/e773e7e2b7ea44f58279bef7af38d5d1 2024-12-08T08:01:04,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741863_1039 (size=98315) 2024-12-08T08:01:04,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741863_1039 (size=98315) 2024-12-08T08:01:04,289 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/e773e7e2b7ea44f58279bef7af38d5d1, entries=13, sequenceid=208, filesize=18.6 K 2024-12-08T08:01:04,290 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=0 B/0 for 91f928a887803a07c6b42721ad9e5c06 in 19ms, sequenceid=208, compaction requested=false 2024-12-08T08:01:04,290 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 91f928a887803a07c6b42721ad9e5c06: 2024-12-08T08:01:04,292 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/fb06259478764a62b7dfba2fa4cb22f5 as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/fb06259478764a62b7dfba2fa4cb22f5 2024-12-08T08:01:04,297 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 91f928a887803a07c6b42721ad9e5c06/info of 91f928a887803a07c6b42721ad9e5c06 into fb06259478764a62b7dfba2fa4cb22f5(size=96.0 K), total size for store is 114.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T08:01:04,297 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 91f928a887803a07c6b42721ad9e5c06: 2024-12-08T08:01:04,297 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06., storeName=91f928a887803a07c6b42721ad9e5c06/info, priority=13, startTime=1733644864269; duration=0sec 2024-12-08T08:01:04,297 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T08:01:04,297 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 91f928a887803a07c6b42721ad9e5c06:info 2024-12-08T08:01:04,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:04,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:05,362 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,363 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,363 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,364 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,364 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,364 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,366 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,366 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,385 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,385 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,385 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,386 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,386 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,386 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,388 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,388 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,388 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,389 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:05,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:05,895 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-08T08:01:05,897 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,898 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,899 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,899 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,900 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,900 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,902 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,903 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,919 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,920 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,920 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,920 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,920 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,920 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:05,924 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:06,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38961 {}] regionserver.HRegion(8855): Flush requested on 91f928a887803a07c6b42721ad9e5c06 2024-12-08T08:01:06,293 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 91f928a887803a07c6b42721ad9e5c06 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T08:01:06,298 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/64b95176bbf04093a03e96185a995261 is 1080, key is row0161/info:/1733644866274/Put/seqid=0 2024-12-08T08:01:06,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741864_1040 (size=12516) 2024-12-08T08:01:06,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741864_1040 (size=12516) 2024-12-08T08:01:06,306 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/64b95176bbf04093a03e96185a995261 2024-12-08T08:01:06,312 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/64b95176bbf04093a03e96185a995261 as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/64b95176bbf04093a03e96185a995261 2024-12-08T08:01:06,317 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/64b95176bbf04093a03e96185a995261, entries=7, sequenceid=219, filesize=12.2 K 2024-12-08T08:01:06,318 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for 91f928a887803a07c6b42721ad9e5c06 in 25ms, sequenceid=219, compaction requested=true 2024-12-08T08:01:06,318 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 91f928a887803a07c6b42721ad9e5c06: 2024-12-08T08:01:06,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 91f928a887803a07c6b42721ad9e5c06:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T08:01:06,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T08:01:06,318 DEBUG [RS:0;0106a245d0e8:38961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T08:01:06,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38961 {}] regionserver.HRegion(8855): Flush requested on 91f928a887803a07c6b42721ad9e5c06 2024-12-08T08:01:06,318 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 91f928a887803a07c6b42721ad9e5c06 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-08T08:01:06,319 DEBUG [RS:0;0106a245d0e8:38961-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 129831 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T08:01:06,319 DEBUG [RS:0;0106a245d0e8:38961-longCompactions-0 {}] regionserver.HStore(1541): 91f928a887803a07c6b42721ad9e5c06/info is initiating minor compaction (all files) 2024-12-08T08:01:06,319 INFO [RS:0;0106a245d0e8:38961-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 91f928a887803a07c6b42721ad9e5c06/info in TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06. 2024-12-08T08:01:06,319 INFO [RS:0;0106a245d0e8:38961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/fb06259478764a62b7dfba2fa4cb22f5, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/e773e7e2b7ea44f58279bef7af38d5d1, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/64b95176bbf04093a03e96185a995261] into tmpdir=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp, totalSize=126.8 K 2024-12-08T08:01:06,319 DEBUG [RS:0;0106a245d0e8:38961-longCompactions-0 {}] compactions.Compactor(225): Compacting fb06259478764a62b7dfba2fa4cb22f5, keycount=86, bloomtype=ROW, size=96.0 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1733644827842 2024-12-08T08:01:06,320 DEBUG [RS:0;0106a245d0e8:38961-longCompactions-0 {}] compactions.Compactor(225): Compacting e773e7e2b7ea44f58279bef7af38d5d1, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1733644864247 2024-12-08T08:01:06,320 DEBUG [RS:0;0106a245d0e8:38961-longCompactions-0 {}] compactions.Compactor(225): Compacting 64b95176bbf04093a03e96185a995261, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1733644866274 2024-12-08T08:01:06,322 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/47d6839cdbac49fb9d961d676330550e is 1080, key is row0168/info:/1733644866294/Put/seqid=0 2024-12-08T08:01:06,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741865_1041 (size=19000) 2024-12-08T08:01:06,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741865_1041 (size=19000) 2024-12-08T08:01:06,327 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/47d6839cdbac49fb9d961d676330550e 2024-12-08T08:01:06,332 INFO [RS:0;0106a245d0e8:38961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 91f928a887803a07c6b42721ad9e5c06#info#compaction#80 average throughput is 54.39 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T08:01:06,332 DEBUG [RS:0;0106a245d0e8:38961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/c767326d248b4d87aff1aaf9a8860e00 is 1080, key is row0062/info:/1733644827842/Put/seqid=0 2024-12-08T08:01:06,336 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/47d6839cdbac49fb9d961d676330550e as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/47d6839cdbac49fb9d961d676330550e 2024-12-08T08:01:06,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741866_1042 (size=119981) 2024-12-08T08:01:06,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741866_1042 (size=119981) 2024-12-08T08:01:06,344 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/47d6839cdbac49fb9d961d676330550e, entries=13, sequenceid=235, filesize=18.6 K 2024-12-08T08:01:06,345 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=12.61 KB/12912 for 91f928a887803a07c6b42721ad9e5c06 in 27ms, sequenceid=235, compaction requested=false 2024-12-08T08:01:06,345 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 91f928a887803a07c6b42721ad9e5c06: 2024-12-08T08:01:06,347 DEBUG [RS:0;0106a245d0e8:38961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/c767326d248b4d87aff1aaf9a8860e00 as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/c767326d248b4d87aff1aaf9a8860e00 2024-12-08T08:01:06,352 INFO [RS:0;0106a245d0e8:38961-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 91f928a887803a07c6b42721ad9e5c06/info of 91f928a887803a07c6b42721ad9e5c06 into c767326d248b4d87aff1aaf9a8860e00(size=117.2 K), total size for store is 135.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T08:01:06,352 DEBUG [RS:0;0106a245d0e8:38961-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 91f928a887803a07c6b42721ad9e5c06: 2024-12-08T08:01:06,352 INFO [RS:0;0106a245d0e8:38961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06., storeName=91f928a887803a07c6b42721ad9e5c06/info, priority=13, startTime=1733644866318; duration=0sec 2024-12-08T08:01:06,352 DEBUG [RS:0;0106a245d0e8:38961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T08:01:06,352 DEBUG [RS:0;0106a245d0e8:38961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 91f928a887803a07c6b42721ad9e5c06:info 2024-12-08T08:01:06,806 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:06,806 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:07,808 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:07,808 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:08,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38961 {}] regionserver.HRegion(8855): Flush requested on 91f928a887803a07c6b42721ad9e5c06 2024-12-08T08:01:08,346 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 91f928a887803a07c6b42721ad9e5c06 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-08T08:01:08,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38961 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=91f928a887803a07c6b42721ad9e5c06, server=0106a245d0e8,38961,1733644804464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T08:01:08,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38961 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:44078 deadline: 1733644878379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=91f928a887803a07c6b42721ad9e5c06, server=0106a245d0e8,38961,1733644804464 2024-12-08T08:01:08,380 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06., hostname=0106a245d0e8,38961,1733644804464, seqNum=103 , the old value is region=TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06., hostname=0106a245d0e8,38961,1733644804464, seqNum=103, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=91f928a887803a07c6b42721ad9e5c06, server=0106a245d0e8,38961,1733644804464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-08T08:01:08,380 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06., hostname=0106a245d0e8,38961,1733644804464, seqNum=103 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=91f928a887803a07c6b42721ad9e5c06, server=0106a245d0e8,38961,1733644804464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-08T08:01:08,380 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06., hostname=0106a245d0e8,38961,1733644804464, seqNum=103 because the exception is null or not the one we care about 2024-12-08T08:01:08,417 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/67a5fe0d14b14ba68cf246ae00ca88ce is 1080, key is row0181/info:/1733644866319/Put/seqid=0 2024-12-08T08:01:08,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741867_1043 (size=19000) 2024-12-08T08:01:08,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741867_1043 (size=19000) 2024-12-08T08:01:08,423 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/67a5fe0d14b14ba68cf246ae00ca88ce 2024-12-08T08:01:08,429 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/67a5fe0d14b14ba68cf246ae00ca88ce as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/67a5fe0d14b14ba68cf246ae00ca88ce 2024-12-08T08:01:08,433 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/67a5fe0d14b14ba68cf246ae00ca88ce, entries=13, sequenceid=252, filesize=18.6 K 2024-12-08T08:01:08,434 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=16.81 KB/17216 for 91f928a887803a07c6b42721ad9e5c06 in 88ms, sequenceid=252, compaction requested=true 2024-12-08T08:01:08,434 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 91f928a887803a07c6b42721ad9e5c06: 2024-12-08T08:01:08,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 91f928a887803a07c6b42721ad9e5c06:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T08:01:08,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T08:01:08,434 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T08:01:08,435 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 157981 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T08:01:08,435 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HStore(1541): 91f928a887803a07c6b42721ad9e5c06/info is initiating minor compaction (all files) 2024-12-08T08:01:08,435 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 91f928a887803a07c6b42721ad9e5c06/info in TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06. 2024-12-08T08:01:08,435 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/c767326d248b4d87aff1aaf9a8860e00, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/47d6839cdbac49fb9d961d676330550e, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/67a5fe0d14b14ba68cf246ae00ca88ce] into tmpdir=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp, totalSize=154.3 K 2024-12-08T08:01:08,435 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.Compactor(225): Compacting c767326d248b4d87aff1aaf9a8860e00, keycount=106, bloomtype=ROW, size=117.2 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1733644827842 2024-12-08T08:01:08,436 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.Compactor(225): Compacting 47d6839cdbac49fb9d961d676330550e, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1733644866294 2024-12-08T08:01:08,436 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.Compactor(225): Compacting 67a5fe0d14b14ba68cf246ae00ca88ce, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733644866319 2024-12-08T08:01:08,447 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 91f928a887803a07c6b42721ad9e5c06#info#compaction#82 average throughput is 67.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T08:01:08,448 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/aaaf7751d6704781927743215bf3a7d8 is 1080, key is row0062/info:/1733644827842/Put/seqid=0 2024-12-08T08:01:08,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741868_1044 (size=148316) 2024-12-08T08:01:08,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741868_1044 (size=148316) 2024-12-08T08:01:08,456 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/aaaf7751d6704781927743215bf3a7d8 as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/aaaf7751d6704781927743215bf3a7d8 2024-12-08T08:01:08,461 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 91f928a887803a07c6b42721ad9e5c06/info of 91f928a887803a07c6b42721ad9e5c06 into aaaf7751d6704781927743215bf3a7d8(size=144.8 K), total size for store is 144.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T08:01:08,461 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 91f928a887803a07c6b42721ad9e5c06: 2024-12-08T08:01:08,461 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06., storeName=91f928a887803a07c6b42721ad9e5c06/info, priority=13, startTime=1733644868434; duration=0sec 2024-12-08T08:01:08,461 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T08:01:08,462 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 91f928a887803a07c6b42721ad9e5c06:info 2024-12-08T08:01:08,809 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:08,809 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:09,810 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:09,810 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:10,811 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:10,811 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:11,811 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:11,811 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:12,813 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:12,813 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:13,814 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:13,814 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:14,815 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:14,815 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:15,817 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:15,817 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:16,023 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 91f928a887803a07c6b42721ad9e5c06, had cached 0 bytes from a total of 148316 2024-12-08T08:01:16,051 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region e3eb3f75a97f486e2692290569af9634, had cached 0 bytes from a total of 70862 2024-12-08T08:01:16,818 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:16,818 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:17,819 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:17,819 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:18,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38961 {}] regionserver.HRegion(8855): Flush requested on 91f928a887803a07c6b42721ad9e5c06 2024-12-08T08:01:18,455 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 91f928a887803a07c6b42721ad9e5c06 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-12-08T08:01:18,463 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/7588362bb9844cc6ac0871f0adc8af36 is 1080, key is row0194/info:/1733644868349/Put/seqid=0 2024-12-08T08:01:18,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741869_1045 (size=23331) 2024-12-08T08:01:18,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741869_1045 (size=23331) 2024-12-08T08:01:18,471 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/7588362bb9844cc6ac0871f0adc8af36 2024-12-08T08:01:18,478 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/7588362bb9844cc6ac0871f0adc8af36 as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/7588362bb9844cc6ac0871f0adc8af36 2024-12-08T08:01:18,483 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/7588362bb9844cc6ac0871f0adc8af36, entries=17, sequenceid=273, filesize=22.8 K 2024-12-08T08:01:18,484 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=11.56 KB/11836 for 91f928a887803a07c6b42721ad9e5c06 in 29ms, sequenceid=273, compaction requested=false 2024-12-08T08:01:18,484 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 91f928a887803a07c6b42721ad9e5c06: 2024-12-08T08:01:18,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38961 {}] regionserver.HRegion(8855): Flush requested on 91f928a887803a07c6b42721ad9e5c06 2024-12-08T08:01:18,484 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 91f928a887803a07c6b42721ad9e5c06 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-08T08:01:18,488 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/0c7934dbba0c4f05b03389224df5138f is 1080, key is row0211/info:/1733644878458/Put/seqid=0 2024-12-08T08:01:18,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741870_1046 (size=17918) 2024-12-08T08:01:18,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741870_1046 (size=17918) 2024-12-08T08:01:18,493 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/0c7934dbba0c4f05b03389224df5138f 2024-12-08T08:01:18,515 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/0c7934dbba0c4f05b03389224df5138f as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/0c7934dbba0c4f05b03389224df5138f 2024-12-08T08:01:18,520 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/0c7934dbba0c4f05b03389224df5138f, entries=12, sequenceid=288, filesize=17.5 K 2024-12-08T08:01:18,521 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=2.10 KB/2152 for 91f928a887803a07c6b42721ad9e5c06 in 37ms, sequenceid=288, compaction requested=true 2024-12-08T08:01:18,521 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 91f928a887803a07c6b42721ad9e5c06: 2024-12-08T08:01:18,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 91f928a887803a07c6b42721ad9e5c06:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T08:01:18,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T08:01:18,521 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T08:01:18,522 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 189565 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T08:01:18,522 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HStore(1541): 91f928a887803a07c6b42721ad9e5c06/info is initiating minor compaction (all files) 2024-12-08T08:01:18,522 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 91f928a887803a07c6b42721ad9e5c06/info in TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06. 2024-12-08T08:01:18,523 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/aaaf7751d6704781927743215bf3a7d8, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/7588362bb9844cc6ac0871f0adc8af36, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/0c7934dbba0c4f05b03389224df5138f] into tmpdir=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp, totalSize=185.1 K 2024-12-08T08:01:18,523 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.Compactor(225): Compacting aaaf7751d6704781927743215bf3a7d8, keycount=132, bloomtype=ROW, size=144.8 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733644827842 2024-12-08T08:01:18,523 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7588362bb9844cc6ac0871f0adc8af36, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1733644868349 2024-12-08T08:01:18,524 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0c7934dbba0c4f05b03389224df5138f, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1733644878458 2024-12-08T08:01:18,534 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 91f928a887803a07c6b42721ad9e5c06#info#compaction#85 average throughput is 55.07 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T08:01:18,534 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/46c22e8531f74dbcbaf8f28dd1def250 is 1080, key is row0062/info:/1733644827842/Put/seqid=0 2024-12-08T08:01:18,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741871_1047 (size=179715) 2024-12-08T08:01:18,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741871_1047 (size=179715) 2024-12-08T08:01:18,540 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/46c22e8531f74dbcbaf8f28dd1def250 as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/46c22e8531f74dbcbaf8f28dd1def250 2024-12-08T08:01:18,545 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 91f928a887803a07c6b42721ad9e5c06/info of 91f928a887803a07c6b42721ad9e5c06 into 46c22e8531f74dbcbaf8f28dd1def250(size=175.5 K), total size for store is 175.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T08:01:18,545 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 91f928a887803a07c6b42721ad9e5c06: 2024-12-08T08:01:18,545 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06., storeName=91f928a887803a07c6b42721ad9e5c06/info, priority=13, startTime=1733644878521; duration=0sec 2024-12-08T08:01:18,546 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T08:01:18,546 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 91f928a887803a07c6b42721ad9e5c06:info 2024-12-08T08:01:18,821 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:18,821 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:19,822 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:19,822 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:20,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38961 {}] regionserver.HRegion(8855): Flush requested on 91f928a887803a07c6b42721ad9e5c06 2024-12-08T08:01:20,504 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 91f928a887803a07c6b42721ad9e5c06 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T08:01:20,510 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/fe8eb32fa4b84bbab10105bc26fc1867 is 1080, key is row0223/info:/1733644878485/Put/seqid=0 2024-12-08T08:01:20,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741872_1048 (size=12523) 2024-12-08T08:01:20,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741872_1048 (size=12523) 2024-12-08T08:01:20,517 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/fe8eb32fa4b84bbab10105bc26fc1867 2024-12-08T08:01:20,524 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/fe8eb32fa4b84bbab10105bc26fc1867 as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/fe8eb32fa4b84bbab10105bc26fc1867 2024-12-08T08:01:20,529 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/fe8eb32fa4b84bbab10105bc26fc1867, entries=7, sequenceid=299, filesize=12.2 K 2024-12-08T08:01:20,530 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 91f928a887803a07c6b42721ad9e5c06 in 26ms, sequenceid=299, compaction requested=false 2024-12-08T08:01:20,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 91f928a887803a07c6b42721ad9e5c06: 2024-12-08T08:01:20,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38961 {}] regionserver.HRegion(8855): Flush requested on 91f928a887803a07c6b42721ad9e5c06 2024-12-08T08:01:20,532 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 91f928a887803a07c6b42721ad9e5c06 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-08T08:01:20,536 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/24725b69bb28431aa21531e9c2dbb14f is 1080, key is row0230/info:/1733644880505/Put/seqid=0 2024-12-08T08:01:20,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741873_1049 (size=19013) 2024-12-08T08:01:20,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741873_1049 (size=19013) 2024-12-08T08:01:20,541 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/24725b69bb28431aa21531e9c2dbb14f 2024-12-08T08:01:20,546 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/24725b69bb28431aa21531e9c2dbb14f as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/24725b69bb28431aa21531e9c2dbb14f 2024-12-08T08:01:20,551 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/24725b69bb28431aa21531e9c2dbb14f, entries=13, sequenceid=315, filesize=18.6 K 2024-12-08T08:01:20,551 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=11.56 KB/11836 for 91f928a887803a07c6b42721ad9e5c06 in 20ms, sequenceid=315, compaction requested=true 2024-12-08T08:01:20,551 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 91f928a887803a07c6b42721ad9e5c06: 2024-12-08T08:01:20,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 91f928a887803a07c6b42721ad9e5c06:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T08:01:20,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T08:01:20,552 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T08:01:20,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38961 {}] regionserver.HRegion(8855): Flush requested on 91f928a887803a07c6b42721ad9e5c06 2024-12-08T08:01:20,552 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 91f928a887803a07c6b42721ad9e5c06 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-08T08:01:20,553 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 211251 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T08:01:20,553 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HStore(1541): 91f928a887803a07c6b42721ad9e5c06/info is initiating minor compaction (all files) 2024-12-08T08:01:20,553 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 91f928a887803a07c6b42721ad9e5c06/info in TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06. 2024-12-08T08:01:20,553 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/46c22e8531f74dbcbaf8f28dd1def250, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/fe8eb32fa4b84bbab10105bc26fc1867, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/24725b69bb28431aa21531e9c2dbb14f] into tmpdir=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp, totalSize=206.3 K 2024-12-08T08:01:20,553 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.Compactor(225): Compacting 46c22e8531f74dbcbaf8f28dd1def250, keycount=161, bloomtype=ROW, size=175.5 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1733644827842 2024-12-08T08:01:20,554 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.Compactor(225): Compacting fe8eb32fa4b84bbab10105bc26fc1867, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1733644878485 2024-12-08T08:01:20,554 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] compactions.Compactor(225): Compacting 24725b69bb28431aa21531e9c2dbb14f, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1733644880505 2024-12-08T08:01:20,556 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/e448211d60d74a748cc925f08f8b8855 is 1080, key is row0243/info:/1733644880532/Put/seqid=0 2024-12-08T08:01:20,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741874_1050 (size=17918) 2024-12-08T08:01:20,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741874_1050 (size=17918) 2024-12-08T08:01:20,561 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/e448211d60d74a748cc925f08f8b8855 2024-12-08T08:01:20,566 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/e448211d60d74a748cc925f08f8b8855 as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/e448211d60d74a748cc925f08f8b8855 2024-12-08T08:01:20,567 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 91f928a887803a07c6b42721ad9e5c06#info#compaction#89 average throughput is 61.91 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T08:01:20,567 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/e4bc63680f704f249a1e311298ec8de8 is 1080, key is row0062/info:/1733644827842/Put/seqid=0 2024-12-08T08:01:20,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741875_1051 (size=201401) 2024-12-08T08:01:20,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741875_1051 (size=201401) 2024-12-08T08:01:20,571 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/e448211d60d74a748cc925f08f8b8855, entries=12, sequenceid=330, filesize=17.5 K 2024-12-08T08:01:20,571 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=2.10 KB/2152 for 91f928a887803a07c6b42721ad9e5c06 in 19ms, sequenceid=330, compaction requested=false 2024-12-08T08:01:20,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 91f928a887803a07c6b42721ad9e5c06: 2024-12-08T08:01:20,575 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/e4bc63680f704f249a1e311298ec8de8 as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/e4bc63680f704f249a1e311298ec8de8 2024-12-08T08:01:20,580 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 91f928a887803a07c6b42721ad9e5c06/info of 91f928a887803a07c6b42721ad9e5c06 into e4bc63680f704f249a1e311298ec8de8(size=196.7 K), total size for store is 214.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T08:01:20,580 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 91f928a887803a07c6b42721ad9e5c06: 2024-12-08T08:01:20,580 INFO [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06., storeName=91f928a887803a07c6b42721ad9e5c06/info, priority=13, startTime=1733644880551; duration=0sec 2024-12-08T08:01:20,580 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T08:01:20,580 DEBUG [RS:0;0106a245d0e8:38961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 91f928a887803a07c6b42721ad9e5c06:info 2024-12-08T08:01:20,823 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:20,823 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:21,824 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:21,824 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:22,557 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-12-08T08:01:22,558 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C38961%2C1733644804464.1733644882557 2024-12-08T08:01:22,580 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:22,580 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:22,580 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:22,580 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:22,580 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:22,580 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/WALs/0106a245d0e8,38961,1733644804464/0106a245d0e8%2C38961%2C1733644804464.1733644804996 with entries=313, filesize=308.61 KB; new WAL /user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/WALs/0106a245d0e8,38961,1733644804464/0106a245d0e8%2C38961%2C1733644804464.1733644882557 2024-12-08T08:01:22,581 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37403:37403),(127.0.0.1/127.0.0.1:44551:44551)] 2024-12-08T08:01:22,581 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/WALs/0106a245d0e8,38961,1733644804464/0106a245d0e8%2C38961%2C1733644804464.1733644804996 is not closed yet, will try archiving it next time 2024-12-08T08:01:22,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741833_1009 (size=316020) 2024-12-08T08:01:22,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741833_1009 (size=316020) 2024-12-08T08:01:22,585 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-12-08T08:01:22,589 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/hbase/meta/1588230740/.tmp/info/73c0a01143ed4289a593bca958204d1e is 186, key is TestLogRolling-testLogRolling,,1733644830378.e3eb3f75a97f486e2692290569af9634./info:regioninfo/1733644831078/Put/seqid=0 2024-12-08T08:01:22,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741877_1053 (size=6153) 2024-12-08T08:01:22,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741877_1053 (size=6153) 2024-12-08T08:01:22,594 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/hbase/meta/1588230740/.tmp/info/73c0a01143ed4289a593bca958204d1e 2024-12-08T08:01:22,601 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/hbase/meta/1588230740/.tmp/info/73c0a01143ed4289a593bca958204d1e as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/hbase/meta/1588230740/info/73c0a01143ed4289a593bca958204d1e 2024-12-08T08:01:22,607 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/hbase/meta/1588230740/info/73c0a01143ed4289a593bca958204d1e, entries=5, sequenceid=21, filesize=6.0 K 2024-12-08T08:01:22,608 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 23ms, sequenceid=21, compaction requested=false 2024-12-08T08:01:22,608 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-08T08:01:22,608 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 91f928a887803a07c6b42721ad9e5c06 1/1 column families, dataSize=2.10 KB heapSize=2.50 KB 2024-12-08T08:01:22,613 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/f15cbb4db02e468897148ff3425c170b is 1080, key is row0255/info:/1733644880553/Put/seqid=0 2024-12-08T08:01:22,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741878_1054 (size=7116) 2024-12-08T08:01:22,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741878_1054 (size=7116) 2024-12-08T08:01:22,618 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.10 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/f15cbb4db02e468897148ff3425c170b 2024-12-08T08:01:22,622 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/.tmp/info/f15cbb4db02e468897148ff3425c170b as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/f15cbb4db02e468897148ff3425c170b 2024-12-08T08:01:22,626 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/f15cbb4db02e468897148ff3425c170b, entries=2, sequenceid=336, filesize=6.9 K 2024-12-08T08:01:22,627 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~2.10 KB/2152, heapSize ~2.48 KB/2544, currentSize=0 B/0 for 91f928a887803a07c6b42721ad9e5c06 in 19ms, sequenceid=336, compaction requested=true 2024-12-08T08:01:22,627 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 91f928a887803a07c6b42721ad9e5c06: 2024-12-08T08:01:22,627 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for e3eb3f75a97f486e2692290569af9634: 2024-12-08T08:01:22,627 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C38961%2C1733644804464.1733644882627 2024-12-08T08:01:22,633 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:22,633 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:22,633 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:22,633 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:22,633 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:22,633 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/WALs/0106a245d0e8,38961,1733644804464/0106a245d0e8%2C38961%2C1733644804464.1733644882557 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/WALs/0106a245d0e8,38961,1733644804464/0106a245d0e8%2C38961%2C1733644804464.1733644882627 2024-12-08T08:01:22,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741876_1052 (size=731) 2024-12-08T08:01:22,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741876_1052 (size=731) 2024-12-08T08:01:22,635 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37403:37403),(127.0.0.1/127.0.0.1:44551:44551)] 2024-12-08T08:01:22,635 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/WALs/0106a245d0e8,38961,1733644804464/0106a245d0e8%2C38961%2C1733644804464.1733644882557 is not closed yet, will try archiving it next time 2024-12-08T08:01:22,635 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/WALs/0106a245d0e8,38961,1733644804464/0106a245d0e8%2C38961%2C1733644804464.1733644804996 to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/oldWALs/0106a245d0e8%2C38961%2C1733644804464.1733644804996 2024-12-08T08:01:22,635 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-12-08T08:01:22,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-08T08:01:22,636 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T08:01:22,636 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T08:01:22,636 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T08:01:22,636 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/WALs/0106a245d0e8,38961,1733644804464/0106a245d0e8%2C38961%2C1733644804464.1733644882557 to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/oldWALs/0106a245d0e8%2C38961%2C1733644804464.1733644882557 2024-12-08T08:01:22,636 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T08:01:22,636 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-08T08:01:22,636 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-08T08:01:22,636 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=424466806, stopped=false 2024-12-08T08:01:22,636 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=0106a245d0e8,40401,1733644804285 2024-12-08T08:01:22,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38961-0x1000470676f0001, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T08:01:22,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40401-0x1000470676f0000, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T08:01:22,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38961-0x1000470676f0001, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:01:22,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40401-0x1000470676f0000, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:01:22,707 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T08:01:22,708 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T08:01:22,708 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T08:01:22,708 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40401-0x1000470676f0000, quorum=127.0.0.1:64925, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T08:01:22,708 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T08:01:22,708 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38961-0x1000470676f0001, quorum=127.0.0.1:64925, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T08:01:22,709 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0106a245d0e8,38961,1733644804464' ***** 2024-12-08T08:01:22,709 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T08:01:22,710 INFO [RS:0;0106a245d0e8:38961 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T08:01:22,710 INFO [RS:0;0106a245d0e8:38961 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T08:01:22,710 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T08:01:22,710 INFO [RS:0;0106a245d0e8:38961 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T08:01:22,710 INFO [RS:0;0106a245d0e8:38961 {}] regionserver.HRegionServer(3091): Received CLOSE for 91f928a887803a07c6b42721ad9e5c06 2024-12-08T08:01:22,710 INFO [RS:0;0106a245d0e8:38961 {}] regionserver.HRegionServer(3091): Received CLOSE for e3eb3f75a97f486e2692290569af9634 2024-12-08T08:01:22,710 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 91f928a887803a07c6b42721ad9e5c06, disabling compactions & flushes 2024-12-08T08:01:22,710 INFO [RS:0;0106a245d0e8:38961 {}] regionserver.HRegionServer(959): stopping server 0106a245d0e8,38961,1733644804464 2024-12-08T08:01:22,711 INFO [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06. 2024-12-08T08:01:22,711 INFO [RS:0;0106a245d0e8:38961 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T08:01:22,711 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06. 2024-12-08T08:01:22,711 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06. after waiting 0 ms 2024-12-08T08:01:22,711 INFO [RS:0;0106a245d0e8:38961 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;0106a245d0e8:38961. 2024-12-08T08:01:22,711 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06. 2024-12-08T08:01:22,711 DEBUG [RS:0;0106a245d0e8:38961 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T08:01:22,711 DEBUG [RS:0;0106a245d0e8:38961 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T08:01:22,711 INFO [RS:0;0106a245d0e8:38961 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T08:01:22,711 INFO [RS:0;0106a245d0e8:38961 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T08:01:22,711 INFO [RS:0;0106a245d0e8:38961 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T08:01:22,711 INFO [RS:0;0106a245d0e8:38961 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-08T08:01:22,712 INFO [RS:0;0106a245d0e8:38961 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-12-08T08:01:22,712 DEBUG [RS:0;0106a245d0e8:38961 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 91f928a887803a07c6b42721ad9e5c06=TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06., e3eb3f75a97f486e2692290569af9634=TestLogRolling-testLogRolling,,1733644830378.e3eb3f75a97f486e2692290569af9634.} 2024-12-08T08:01:22,712 DEBUG [RS:0;0106a245d0e8:38961 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 91f928a887803a07c6b42721ad9e5c06, e3eb3f75a97f486e2692290569af9634 2024-12-08T08:01:22,712 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T08:01:22,712 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T08:01:22,712 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T08:01:22,712 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T08:01:22,712 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T08:01:22,711 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/d1da942a97ca478a8ae863e3ba8ca8cd.3926904d5898a944bd497c60411add81->hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/d1da942a97ca478a8ae863e3ba8ca8cd-top, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/df668daf528443f7b27be216c0a1d63c, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/TestLogRolling-testLogRolling=3926904d5898a944bd497c60411add81-a5994d318c114846a5eb502d37e1645f, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/6474843eaffe482584cb2f2200b72a6b, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/3c2de12e048044d0a8032f59bb0156a8, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/df2f2c367f374ace98371a56fb939634, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/e9300060b10e467181f71642d484db0f, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/5f5274abb640478ab22dc04913c11091, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/518bd57d29834d37b01a5278e1384acc, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/b07afe129b2c4853a1f5a7d041bd4c25, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/fb06259478764a62b7dfba2fa4cb22f5, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/be5cd5c0b9b64e7ea7c39aff9e6fb170, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/e773e7e2b7ea44f58279bef7af38d5d1, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/c767326d248b4d87aff1aaf9a8860e00, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/64b95176bbf04093a03e96185a995261, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/47d6839cdbac49fb9d961d676330550e, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/aaaf7751d6704781927743215bf3a7d8, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/67a5fe0d14b14ba68cf246ae00ca88ce, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/7588362bb9844cc6ac0871f0adc8af36, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/46c22e8531f74dbcbaf8f28dd1def250, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/0c7934dbba0c4f05b03389224df5138f, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/fe8eb32fa4b84bbab10105bc26fc1867, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/24725b69bb28431aa21531e9c2dbb14f] to archive 2024-12-08T08:01:22,714 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T08:01:22,716 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/d1da942a97ca478a8ae863e3ba8ca8cd.3926904d5898a944bd497c60411add81 to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/archive/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/d1da942a97ca478a8ae863e3ba8ca8cd.3926904d5898a944bd497c60411add81 2024-12-08T08:01:22,718 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/df668daf528443f7b27be216c0a1d63c to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/archive/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/df668daf528443f7b27be216c0a1d63c 2024-12-08T08:01:22,718 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-12-08T08:01:22,719 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T08:01:22,719 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T08:01:22,719 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733644882712Running coprocessor pre-close hooks at 1733644882712Disabling compacts and flushes for region at 1733644882712Disabling writes for close at 1733644882712Writing region close event to WAL at 1733644882714 (+2 ms)Running coprocessor post-close hooks at 1733644882719 (+5 ms)Closed at 1733644882719 2024-12-08T08:01:22,719 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-08T08:01:22,719 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/TestLogRolling-testLogRolling=3926904d5898a944bd497c60411add81-a5994d318c114846a5eb502d37e1645f to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/archive/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/TestLogRolling-testLogRolling=3926904d5898a944bd497c60411add81-a5994d318c114846a5eb502d37e1645f 2024-12-08T08:01:22,720 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/6474843eaffe482584cb2f2200b72a6b to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/archive/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/6474843eaffe482584cb2f2200b72a6b 2024-12-08T08:01:22,721 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/3c2de12e048044d0a8032f59bb0156a8 to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/archive/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/3c2de12e048044d0a8032f59bb0156a8 2024-12-08T08:01:22,722 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/df2f2c367f374ace98371a56fb939634 to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/archive/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/df2f2c367f374ace98371a56fb939634 2024-12-08T08:01:22,723 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/e9300060b10e467181f71642d484db0f to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/archive/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/e9300060b10e467181f71642d484db0f 2024-12-08T08:01:22,724 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/5f5274abb640478ab22dc04913c11091 to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/archive/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/5f5274abb640478ab22dc04913c11091 2024-12-08T08:01:22,725 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/518bd57d29834d37b01a5278e1384acc to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/archive/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/518bd57d29834d37b01a5278e1384acc 2024-12-08T08:01:22,726 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/b07afe129b2c4853a1f5a7d041bd4c25 to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/archive/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/b07afe129b2c4853a1f5a7d041bd4c25 2024-12-08T08:01:22,727 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/fb06259478764a62b7dfba2fa4cb22f5 to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/archive/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/fb06259478764a62b7dfba2fa4cb22f5 2024-12-08T08:01:22,728 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/be5cd5c0b9b64e7ea7c39aff9e6fb170 to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/archive/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/be5cd5c0b9b64e7ea7c39aff9e6fb170 2024-12-08T08:01:22,729 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/e773e7e2b7ea44f58279bef7af38d5d1 to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/archive/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/e773e7e2b7ea44f58279bef7af38d5d1 2024-12-08T08:01:22,730 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/c767326d248b4d87aff1aaf9a8860e00 to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/archive/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/c767326d248b4d87aff1aaf9a8860e00 2024-12-08T08:01:22,731 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/64b95176bbf04093a03e96185a995261 to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/archive/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/64b95176bbf04093a03e96185a995261 2024-12-08T08:01:22,731 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/47d6839cdbac49fb9d961d676330550e to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/archive/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/47d6839cdbac49fb9d961d676330550e 2024-12-08T08:01:22,732 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/aaaf7751d6704781927743215bf3a7d8 to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/archive/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/aaaf7751d6704781927743215bf3a7d8 2024-12-08T08:01:22,733 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/67a5fe0d14b14ba68cf246ae00ca88ce to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/archive/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/67a5fe0d14b14ba68cf246ae00ca88ce 2024-12-08T08:01:22,734 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/7588362bb9844cc6ac0871f0adc8af36 to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/archive/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/7588362bb9844cc6ac0871f0adc8af36 2024-12-08T08:01:22,735 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/46c22e8531f74dbcbaf8f28dd1def250 to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/archive/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/46c22e8531f74dbcbaf8f28dd1def250 2024-12-08T08:01:22,735 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/0c7934dbba0c4f05b03389224df5138f to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/archive/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/0c7934dbba0c4f05b03389224df5138f 2024-12-08T08:01:22,736 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/fe8eb32fa4b84bbab10105bc26fc1867 to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/archive/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/fe8eb32fa4b84bbab10105bc26fc1867 2024-12-08T08:01:22,737 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/24725b69bb28431aa21531e9c2dbb14f to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/archive/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/info/24725b69bb28431aa21531e9c2dbb14f 2024-12-08T08:01:22,737 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=0106a245d0e8:40401 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-08T08:01:22,737 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [df668daf528443f7b27be216c0a1d63c=23465, 6474843eaffe482584cb2f2200b72a6b=12509, 3c2de12e048044d0a8032f59bb0156a8=44066, df2f2c367f374ace98371a56fb939634=17894, e9300060b10e467181f71642d484db0f=16828, 5f5274abb640478ab22dc04913c11091=76649, 518bd57d29834d37b01a5278e1384acc=25472, b07afe129b2c4853a1f5a7d041bd4c25=12516, fb06259478764a62b7dfba2fa4cb22f5=98315, be5cd5c0b9b64e7ea7c39aff9e6fb170=19000, e773e7e2b7ea44f58279bef7af38d5d1=19000, c767326d248b4d87aff1aaf9a8860e00=119981, 64b95176bbf04093a03e96185a995261=12516, 47d6839cdbac49fb9d961d676330550e=19000, aaaf7751d6704781927743215bf3a7d8=148316, 67a5fe0d14b14ba68cf246ae00ca88ce=19000, 7588362bb9844cc6ac0871f0adc8af36=23331, 46c22e8531f74dbcbaf8f28dd1def250=179715, 0c7934dbba0c4f05b03389224df5138f=17918, fe8eb32fa4b84bbab10105bc26fc1867=12523, 24725b69bb28431aa21531e9c2dbb14f=19013] 2024-12-08T08:01:22,740 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/91f928a887803a07c6b42721ad9e5c06/recovered.edits/339.seqid, newMaxSeqId=339, maxSeqId=102 2024-12-08T08:01:22,741 INFO [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06. 2024-12-08T08:01:22,741 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 91f928a887803a07c6b42721ad9e5c06: Waiting for close lock at 1733644882710Running coprocessor pre-close hooks at 1733644882710Disabling compacts and flushes for region at 1733644882710Disabling writes for close at 1733644882711 (+1 ms)Writing region close event to WAL at 1733644882738 (+27 ms)Running coprocessor post-close hooks at 1733644882741 (+3 ms)Closed at 1733644882741 2024-12-08T08:01:22,741 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1733644830378.91f928a887803a07c6b42721ad9e5c06. 2024-12-08T08:01:22,741 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing e3eb3f75a97f486e2692290569af9634, disabling compactions & flushes 2024-12-08T08:01:22,741 INFO [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733644830378.e3eb3f75a97f486e2692290569af9634. 2024-12-08T08:01:22,741 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733644830378.e3eb3f75a97f486e2692290569af9634. 2024-12-08T08:01:22,741 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733644830378.e3eb3f75a97f486e2692290569af9634. after waiting 0 ms 2024-12-08T08:01:22,741 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733644830378.e3eb3f75a97f486e2692290569af9634. 2024-12-08T08:01:22,741 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733644830378.e3eb3f75a97f486e2692290569af9634.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/e3eb3f75a97f486e2692290569af9634/info/d1da942a97ca478a8ae863e3ba8ca8cd.3926904d5898a944bd497c60411add81->hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/3926904d5898a944bd497c60411add81/info/d1da942a97ca478a8ae863e3ba8ca8cd-bottom] to archive 2024-12-08T08:01:22,742 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733644830378.e3eb3f75a97f486e2692290569af9634.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T08:01:22,743 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733644830378.e3eb3f75a97f486e2692290569af9634.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/e3eb3f75a97f486e2692290569af9634/info/d1da942a97ca478a8ae863e3ba8ca8cd.3926904d5898a944bd497c60411add81 to hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/archive/data/default/TestLogRolling-testLogRolling/e3eb3f75a97f486e2692290569af9634/info/d1da942a97ca478a8ae863e3ba8ca8cd.3926904d5898a944bd497c60411add81 2024-12-08T08:01:22,743 WARN [StoreCloser-TestLogRolling-testLogRolling,,1733644830378.e3eb3f75a97f486e2692290569af9634.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-12-08T08:01:22,747 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/data/default/TestLogRolling-testLogRolling/e3eb3f75a97f486e2692290569af9634/recovered.edits/107.seqid, newMaxSeqId=107, maxSeqId=102 2024-12-08T08:01:22,747 INFO [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733644830378.e3eb3f75a97f486e2692290569af9634. 2024-12-08T08:01:22,747 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for e3eb3f75a97f486e2692290569af9634: Waiting for close lock at 1733644882741Running coprocessor pre-close hooks at 1733644882741Disabling compacts and flushes for region at 1733644882741Disabling writes for close at 1733644882741Writing region close event to WAL at 1733644882744 (+3 ms)Running coprocessor post-close hooks at 1733644882747 (+3 ms)Closed at 1733644882747 2024-12-08T08:01:22,747 DEBUG [RS_CLOSE_REGION-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1733644830378.e3eb3f75a97f486e2692290569af9634. 2024-12-08T08:01:22,825 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:22,825 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:22,870 INFO [regionserver/0106a245d0e8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T08:01:22,912 INFO [RS:0;0106a245d0e8:38961 {}] regionserver.HRegionServer(976): stopping server 0106a245d0e8,38961,1733644804464; all regions closed. 2024-12-08T08:01:22,913 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:22,913 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:22,914 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:22,914 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:22,914 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:22,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741834_1010 (size=8107) 2024-12-08T08:01:22,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741834_1010 (size=8107) 2024-12-08T08:01:22,924 DEBUG [RS:0;0106a245d0e8:38961 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/oldWALs 2024-12-08T08:01:22,924 INFO [RS:0;0106a245d0e8:38961 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0106a245d0e8%2C38961%2C1733644804464.meta:.meta(num 1733644805464) 2024-12-08T08:01:22,924 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:22,924 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:22,925 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:22,925 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:22,925 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:22,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741879_1055 (size=778) 2024-12-08T08:01:22,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741879_1055 (size=778) 2024-12-08T08:01:22,929 DEBUG [RS:0;0106a245d0e8:38961 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/oldWALs 2024-12-08T08:01:22,929 INFO [RS:0;0106a245d0e8:38961 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0106a245d0e8%2C38961%2C1733644804464:(num 1733644882627) 2024-12-08T08:01:22,929 DEBUG [RS:0;0106a245d0e8:38961 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T08:01:22,929 INFO [RS:0;0106a245d0e8:38961 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T08:01:22,929 INFO [RS:0;0106a245d0e8:38961 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T08:01:22,929 INFO [RS:0;0106a245d0e8:38961 {}] hbase.ChoreService(370): Chore service for: regionserver/0106a245d0e8:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-08T08:01:22,929 INFO [RS:0;0106a245d0e8:38961 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T08:01:22,929 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T08:01:22,929 INFO [RS:0;0106a245d0e8:38961 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38961 2024-12-08T08:01:22,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40401-0x1000470676f0000, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T08:01:22,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38961-0x1000470676f0001, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0106a245d0e8,38961,1733644804464 2024-12-08T08:01:22,940 INFO [RS:0;0106a245d0e8:38961 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T08:01:22,951 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0106a245d0e8,38961,1733644804464] 2024-12-08T08:01:22,961 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0106a245d0e8,38961,1733644804464 already deleted, retry=false 2024-12-08T08:01:22,961 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0106a245d0e8,38961,1733644804464 expired; onlineServers=0 2024-12-08T08:01:22,961 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '0106a245d0e8,40401,1733644804285' ***** 2024-12-08T08:01:22,961 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-08T08:01:22,962 INFO [M:0;0106a245d0e8:40401 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T08:01:22,962 INFO [M:0;0106a245d0e8:40401 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T08:01:22,962 DEBUG [M:0;0106a245d0e8:40401 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-08T08:01:22,962 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-08T08:01:22,962 DEBUG [M:0;0106a245d0e8:40401 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-08T08:01:22,962 DEBUG [master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.large.0-1733644804786 {}] cleaner.HFileCleaner(306): Exit Thread[master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.large.0-1733644804786,5,FailOnTimeoutGroup] 2024-12-08T08:01:22,962 DEBUG [master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.small.0-1733644804786 {}] cleaner.HFileCleaner(306): Exit Thread[master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.small.0-1733644804786,5,FailOnTimeoutGroup] 2024-12-08T08:01:22,962 INFO [M:0;0106a245d0e8:40401 {}] hbase.ChoreService(370): Chore service for: master/0106a245d0e8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-08T08:01:22,962 INFO [M:0;0106a245d0e8:40401 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T08:01:22,963 DEBUG [M:0;0106a245d0e8:40401 {}] master.HMaster(1795): Stopping service threads 2024-12-08T08:01:22,963 INFO [M:0;0106a245d0e8:40401 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-08T08:01:22,963 INFO [M:0;0106a245d0e8:40401 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T08:01:22,963 INFO [M:0;0106a245d0e8:40401 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-08T08:01:22,963 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-08T08:01:22,972 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40401-0x1000470676f0000, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T08:01:22,972 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40401-0x1000470676f0000, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:01:22,972 DEBUG [M:0;0106a245d0e8:40401 {}] zookeeper.ZKUtil(347): master:40401-0x1000470676f0000, quorum=127.0.0.1:64925, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-08T08:01:22,972 WARN [M:0;0106a245d0e8:40401 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-08T08:01:22,973 INFO [M:0;0106a245d0e8:40401 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/.lastflushedseqids 2024-12-08T08:01:22,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741880_1056 (size=228) 2024-12-08T08:01:22,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741880_1056 (size=228) 2024-12-08T08:01:22,979 INFO [M:0;0106a245d0e8:40401 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-08T08:01:22,979 INFO [M:0;0106a245d0e8:40401 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-08T08:01:22,979 DEBUG [M:0;0106a245d0e8:40401 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T08:01:22,979 INFO [M:0;0106a245d0e8:40401 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T08:01:22,979 DEBUG [M:0;0106a245d0e8:40401 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T08:01:22,979 DEBUG [M:0;0106a245d0e8:40401 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T08:01:22,979 DEBUG [M:0;0106a245d0e8:40401 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T08:01:22,980 INFO [M:0;0106a245d0e8:40401 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.43 KB heapSize=63.36 KB 2024-12-08T08:01:22,995 DEBUG [M:0;0106a245d0e8:40401 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f0538fa155ca4df5a5bbc2e9992a83be is 82, key is hbase:meta,,1/info:regioninfo/1733644805489/Put/seqid=0 2024-12-08T08:01:22,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741881_1057 (size=5672) 2024-12-08T08:01:22,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741881_1057 (size=5672) 2024-12-08T08:01:22,999 INFO [M:0;0106a245d0e8:40401 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f0538fa155ca4df5a5bbc2e9992a83be 2024-12-08T08:01:23,018 DEBUG [M:0;0106a245d0e8:40401 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f9ba147aba0b4fdb8f06d7db30385443 is 751, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733644806383/Put/seqid=0 2024-12-08T08:01:23,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741882_1058 (size=7091) 2024-12-08T08:01:23,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741882_1058 (size=7091) 2024-12-08T08:01:23,029 INFO [M:0;0106a245d0e8:40401 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.83 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f9ba147aba0b4fdb8f06d7db30385443 2024-12-08T08:01:23,034 INFO [M:0;0106a245d0e8:40401 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f9ba147aba0b4fdb8f06d7db30385443 2024-12-08T08:01:23,049 DEBUG [M:0;0106a245d0e8:40401 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/76212fbb93de40078a59c346d2b6c4cf is 69, key is 0106a245d0e8,38961,1733644804464/rs:state/1733644804847/Put/seqid=0 2024-12-08T08:01:23,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38961-0x1000470676f0001, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T08:01:23,051 INFO [RS:0;0106a245d0e8:38961 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T08:01:23,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38961-0x1000470676f0001, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T08:01:23,051 INFO [RS:0;0106a245d0e8:38961 {}] regionserver.HRegionServer(1031): Exiting; stopping=0106a245d0e8,38961,1733644804464; zookeeper connection closed. 2024-12-08T08:01:23,051 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@49a8a9ce {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@49a8a9ce 2024-12-08T08:01:23,052 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-08T08:01:23,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741883_1059 (size=5156) 2024-12-08T08:01:23,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741883_1059 (size=5156) 2024-12-08T08:01:23,053 INFO [M:0;0106a245d0e8:40401 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/76212fbb93de40078a59c346d2b6c4cf 2024-12-08T08:01:23,071 DEBUG [M:0;0106a245d0e8:40401 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ab20a99fe3f945f1a296c11b4c063c4e is 52, key is load_balancer_on/state:d/1733644805600/Put/seqid=0 2024-12-08T08:01:23,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741884_1060 (size=5056) 2024-12-08T08:01:23,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741884_1060 (size=5056) 2024-12-08T08:01:23,075 INFO [M:0;0106a245d0e8:40401 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ab20a99fe3f945f1a296c11b4c063c4e 2024-12-08T08:01:23,079 DEBUG [M:0;0106a245d0e8:40401 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f0538fa155ca4df5a5bbc2e9992a83be as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f0538fa155ca4df5a5bbc2e9992a83be 2024-12-08T08:01:23,083 INFO [M:0;0106a245d0e8:40401 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f0538fa155ca4df5a5bbc2e9992a83be, entries=8, sequenceid=125, filesize=5.5 K 2024-12-08T08:01:23,084 DEBUG [M:0;0106a245d0e8:40401 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f9ba147aba0b4fdb8f06d7db30385443 as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f9ba147aba0b4fdb8f06d7db30385443 2024-12-08T08:01:23,088 INFO [M:0;0106a245d0e8:40401 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f9ba147aba0b4fdb8f06d7db30385443 2024-12-08T08:01:23,089 INFO [M:0;0106a245d0e8:40401 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f9ba147aba0b4fdb8f06d7db30385443, entries=13, sequenceid=125, filesize=6.9 K 2024-12-08T08:01:23,090 DEBUG [M:0;0106a245d0e8:40401 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/76212fbb93de40078a59c346d2b6c4cf as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/76212fbb93de40078a59c346d2b6c4cf 2024-12-08T08:01:23,094 INFO [M:0;0106a245d0e8:40401 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/76212fbb93de40078a59c346d2b6c4cf, entries=1, sequenceid=125, filesize=5.0 K 2024-12-08T08:01:23,095 DEBUG [M:0;0106a245d0e8:40401 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ab20a99fe3f945f1a296c11b4c063c4e as hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ab20a99fe3f945f1a296c11b4c063c4e 2024-12-08T08:01:23,100 INFO [M:0;0106a245d0e8:40401 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38901/user/jenkins/test-data/2cc1d06a-32fc-fd7c-bb2c-f7efe89b6c0a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ab20a99fe3f945f1a296c11b4c063c4e, entries=1, sequenceid=125, filesize=4.9 K 2024-12-08T08:01:23,100 INFO [M:0;0106a245d0e8:40401 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 121ms, sequenceid=125, compaction requested=false 2024-12-08T08:01:23,102 INFO [M:0;0106a245d0e8:40401 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T08:01:23,102 DEBUG [M:0;0106a245d0e8:40401 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733644882979Disabling compacts and flushes for region at 1733644882979Disabling writes for close at 1733644882979Obtaining lock to block concurrent updates at 1733644882980 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733644882980Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52663, getHeapSize=64816, getOffHeapSize=0, getCellsCount=148 at 1733644882980Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733644882981 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733644882981Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733644882994 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733644882994Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733644883004 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733644883018 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733644883018Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733644883034 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733644883049 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733644883049Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733644883057 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733644883070 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733644883070Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3d709bed: reopening flushed file at 1733644883078 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@14489e4f: reopening flushed file at 1733644883083 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5fd1a97e: reopening flushed file at 1733644883089 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@35da5c8e: reopening flushed file at 1733644883094 (+5 ms)Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 121ms, sequenceid=125, compaction requested=false at 1733644883100 (+6 ms)Writing region close event to WAL at 1733644883102 (+2 ms)Closed at 1733644883102 2024-12-08T08:01:23,102 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:23,102 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:23,102 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:23,102 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:23,102 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:23,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741830_1006 (size=61332) 2024-12-08T08:01:23,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40687 is added to blk_1073741830_1006 (size=61332) 2024-12-08T08:01:23,105 INFO [M:0;0106a245d0e8:40401 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-08T08:01:23,105 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T08:01:23,105 INFO [M:0;0106a245d0e8:40401 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40401 2024-12-08T08:01:23,105 INFO [M:0;0106a245d0e8:40401 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T08:01:23,217 INFO [M:0;0106a245d0e8:40401 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T08:01:23,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40401-0x1000470676f0000, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T08:01:23,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40401-0x1000470676f0000, quorum=127.0.0.1:64925, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T08:01:23,253 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@499df229{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T08:01:23,253 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3323ea67{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T08:01:23,253 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T08:01:23,253 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49bf1df8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T08:01:23,253 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@268a31fc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/hadoop.log.dir/,STOPPED} 2024-12-08T08:01:23,255 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T08:01:23,255 WARN [BP-840104754-172.17.0.2-1733644801755 heartbeating to localhost/127.0.0.1:38901 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T08:01:23,255 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T08:01:23,255 WARN [BP-840104754-172.17.0.2-1733644801755 heartbeating to localhost/127.0.0.1:38901 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-840104754-172.17.0.2-1733644801755 (Datanode Uuid 0dd5dfd3-8847-4a95-8449-e40b31516f4c) service to localhost/127.0.0.1:38901 2024-12-08T08:01:23,256 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/cluster_aaac9a43-b979-b7bd-4dc0-5c8e8d096da3/data/data3/current/BP-840104754-172.17.0.2-1733644801755 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T08:01:23,256 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/cluster_aaac9a43-b979-b7bd-4dc0-5c8e8d096da3/data/data4/current/BP-840104754-172.17.0.2-1733644801755 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T08:01:23,256 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T08:01:23,259 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3fc20c75{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T08:01:23,259 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4b32401d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T08:01:23,259 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T08:01:23,259 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@345bbf4d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T08:01:23,259 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a6db152{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/hadoop.log.dir/,STOPPED} 2024-12-08T08:01:23,261 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T08:01:23,261 WARN [BP-840104754-172.17.0.2-1733644801755 heartbeating to localhost/127.0.0.1:38901 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T08:01:23,261 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T08:01:23,261 WARN [BP-840104754-172.17.0.2-1733644801755 heartbeating to localhost/127.0.0.1:38901 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-840104754-172.17.0.2-1733644801755 (Datanode Uuid 8fda1568-2a57-4476-a731-d62e21810730) service to localhost/127.0.0.1:38901 2024-12-08T08:01:23,262 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/cluster_aaac9a43-b979-b7bd-4dc0-5c8e8d096da3/data/data1/current/BP-840104754-172.17.0.2-1733644801755 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T08:01:23,262 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/cluster_aaac9a43-b979-b7bd-4dc0-5c8e8d096da3/data/data2/current/BP-840104754-172.17.0.2-1733644801755 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T08:01:23,262 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T08:01:23,268 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5cfa2328{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T08:01:23,269 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@250c37c4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T08:01:23,269 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T08:01:23,269 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@444d0b71{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T08:01:23,269 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ac7d52f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/hadoop.log.dir/,STOPPED} 2024-12-08T08:01:23,275 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-08T08:01:23,301 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-08T08:01:23,310 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=225 (was 205) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:38901 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:38901 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38901 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:38901 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:38901 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38901 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38901 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38901 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=512 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=99 (was 204), ProcessCount=11 (was 11), AvailableMemoryMB=8754 (was 8098) - AvailableMemoryMB LEAK? - 2024-12-08T08:01:23,317 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=225, OpenFileDescriptor=512, MaxFileDescriptor=1048576, SystemLoadAverage=99, ProcessCount=11, AvailableMemoryMB=8754 2024-12-08T08:01:23,317 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-08T08:01:23,318 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/hadoop.log.dir so I do NOT create it in target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342 2024-12-08T08:01:23,318 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ef1dab22-9d92-508e-c394-b95355bdc939/hadoop.tmp.dir so I do NOT create it in target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342 2024-12-08T08:01:23,318 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/cluster_ec1afa6a-3d3e-6660-ad50-a096f92fa3a4, deleteOnExit=true 2024-12-08T08:01:23,318 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-08T08:01:23,318 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/test.cache.data in system properties and HBase conf 2024-12-08T08:01:23,318 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/hadoop.tmp.dir in system properties and HBase conf 2024-12-08T08:01:23,318 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/hadoop.log.dir in system properties and HBase conf 2024-12-08T08:01:23,318 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-08T08:01:23,318 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-08T08:01:23,318 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-08T08:01:23,318 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-08T08:01:23,318 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-08T08:01:23,318 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-08T08:01:23,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-08T08:01:23,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T08:01:23,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-08T08:01:23,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-08T08:01:23,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T08:01:23,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T08:01:23,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-08T08:01:23,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/nfs.dump.dir in system properties and HBase conf 2024-12-08T08:01:23,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/java.io.tmpdir in system properties and HBase conf 2024-12-08T08:01:23,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T08:01:23,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-08T08:01:23,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-08T08:01:23,331 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T08:01:23,743 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T08:01:23,746 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T08:01:23,747 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T08:01:23,747 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T08:01:23,747 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T08:01:23,748 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T08:01:23,748 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@377e4a58{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/hadoop.log.dir/,AVAILABLE} 2024-12-08T08:01:23,748 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@b404f99{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T08:01:23,826 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:23,826 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:23,835 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@186f146f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/java.io.tmpdir/jetty-localhost-37709-hadoop-hdfs-3_4_1-tests_jar-_-any-2719045016049982290/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T08:01:23,835 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@10e33487{HTTP/1.1, (http/1.1)}{localhost:37709} 2024-12-08T08:01:23,835 INFO [Time-limited test {}] server.Server(415): Started @331403ms 2024-12-08T08:01:23,846 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T08:01:24,140 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T08:01:24,142 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T08:01:24,143 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T08:01:24,143 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T08:01:24,143 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T08:01:24,144 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2a1af98b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/hadoop.log.dir/,AVAILABLE} 2024-12-08T08:01:24,144 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4bb82bb7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T08:01:24,244 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@23e03366{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/java.io.tmpdir/jetty-localhost-34389-hadoop-hdfs-3_4_1-tests_jar-_-any-1290207380029053322/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T08:01:24,244 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2fb7753b{HTTP/1.1, (http/1.1)}{localhost:34389} 2024-12-08T08:01:24,244 INFO [Time-limited test {}] server.Server(415): Started @331812ms 2024-12-08T08:01:24,245 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T08:01:24,270 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T08:01:24,273 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T08:01:24,273 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T08:01:24,273 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T08:01:24,273 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T08:01:24,274 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@22ed154c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/hadoop.log.dir/,AVAILABLE} 2024-12-08T08:01:24,274 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@14628127{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T08:01:24,363 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76be211{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/java.io.tmpdir/jetty-localhost-41189-hadoop-hdfs-3_4_1-tests_jar-_-any-9626542147841251999/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T08:01:24,364 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c2fc043{HTTP/1.1, (http/1.1)}{localhost:41189} 2024-12-08T08:01:24,364 INFO [Time-limited test {}] server.Server(415): Started @331931ms 2024-12-08T08:01:24,365 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T08:01:24,827 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:24,827 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:25,382 WARN [Thread-2492 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/cluster_ec1afa6a-3d3e-6660-ad50-a096f92fa3a4/data/data1/current/BP-493202706-172.17.0.2-1733644883334/current, will proceed with Du for space computation calculation, 2024-12-08T08:01:25,382 WARN [Thread-2493 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/cluster_ec1afa6a-3d3e-6660-ad50-a096f92fa3a4/data/data2/current/BP-493202706-172.17.0.2-1733644883334/current, will proceed with Du for space computation calculation, 2024-12-08T08:01:25,396 WARN [Thread-2456 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T08:01:25,398 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x83062c672963074e with lease ID 0x2c368731442c3a81: Processing first storage report for DS-37b186f2-3cd0-47b1-96b4-c6f5d5031512 from datanode DatanodeRegistration(127.0.0.1:33017, datanodeUuid=ff560f21-5653-411f-98ef-99fa0569accc, infoPort=41839, infoSecurePort=0, ipcPort=35247, storageInfo=lv=-57;cid=testClusterID;nsid=245218051;c=1733644883334) 2024-12-08T08:01:25,398 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x83062c672963074e with lease ID 0x2c368731442c3a81: from storage DS-37b186f2-3cd0-47b1-96b4-c6f5d5031512 node DatanodeRegistration(127.0.0.1:33017, datanodeUuid=ff560f21-5653-411f-98ef-99fa0569accc, infoPort=41839, infoSecurePort=0, ipcPort=35247, storageInfo=lv=-57;cid=testClusterID;nsid=245218051;c=1733644883334), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T08:01:25,398 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x83062c672963074e with lease ID 0x2c368731442c3a81: Processing first storage report for DS-596b020e-8039-4125-9742-e37615bfbab8 from datanode DatanodeRegistration(127.0.0.1:33017, datanodeUuid=ff560f21-5653-411f-98ef-99fa0569accc, infoPort=41839, infoSecurePort=0, ipcPort=35247, storageInfo=lv=-57;cid=testClusterID;nsid=245218051;c=1733644883334) 2024-12-08T08:01:25,398 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x83062c672963074e with lease ID 0x2c368731442c3a81: from storage DS-596b020e-8039-4125-9742-e37615bfbab8 node DatanodeRegistration(127.0.0.1:33017, datanodeUuid=ff560f21-5653-411f-98ef-99fa0569accc, infoPort=41839, infoSecurePort=0, ipcPort=35247, storageInfo=lv=-57;cid=testClusterID;nsid=245218051;c=1733644883334), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T08:01:25,418 WARN [Thread-2503 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/cluster_ec1afa6a-3d3e-6660-ad50-a096f92fa3a4/data/data3/current/BP-493202706-172.17.0.2-1733644883334/current, will proceed with Du for space computation calculation, 2024-12-08T08:01:25,419 WARN [Thread-2504 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/cluster_ec1afa6a-3d3e-6660-ad50-a096f92fa3a4/data/data4/current/BP-493202706-172.17.0.2-1733644883334/current, will proceed with Du for space computation calculation, 2024-12-08T08:01:25,441 WARN [Thread-2479 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T08:01:25,443 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb5d1d36ba8bf5fe9 with lease ID 0x2c368731442c3a82: Processing first storage report for DS-b89f2698-297d-4a87-b998-b55aa714d86d from datanode DatanodeRegistration(127.0.0.1:36835, datanodeUuid=287aab61-576e-423f-93ab-9f4b9fd4aa1c, infoPort=40333, infoSecurePort=0, ipcPort=39627, storageInfo=lv=-57;cid=testClusterID;nsid=245218051;c=1733644883334) 2024-12-08T08:01:25,443 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb5d1d36ba8bf5fe9 with lease ID 0x2c368731442c3a82: from storage DS-b89f2698-297d-4a87-b998-b55aa714d86d node DatanodeRegistration(127.0.0.1:36835, datanodeUuid=287aab61-576e-423f-93ab-9f4b9fd4aa1c, infoPort=40333, infoSecurePort=0, ipcPort=39627, storageInfo=lv=-57;cid=testClusterID;nsid=245218051;c=1733644883334), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T08:01:25,443 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb5d1d36ba8bf5fe9 with lease ID 0x2c368731442c3a82: Processing first storage report for DS-3d93c11b-0507-4c1c-98e1-d46b22112314 from datanode DatanodeRegistration(127.0.0.1:36835, datanodeUuid=287aab61-576e-423f-93ab-9f4b9fd4aa1c, infoPort=40333, infoSecurePort=0, ipcPort=39627, storageInfo=lv=-57;cid=testClusterID;nsid=245218051;c=1733644883334) 2024-12-08T08:01:25,443 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb5d1d36ba8bf5fe9 with lease ID 0x2c368731442c3a82: from storage DS-3d93c11b-0507-4c1c-98e1-d46b22112314 node DatanodeRegistration(127.0.0.1:36835, datanodeUuid=287aab61-576e-423f-93ab-9f4b9fd4aa1c, infoPort=40333, infoSecurePort=0, ipcPort=39627, storageInfo=lv=-57;cid=testClusterID;nsid=245218051;c=1733644883334), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T08:01:25,493 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342 2024-12-08T08:01:25,498 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/cluster_ec1afa6a-3d3e-6660-ad50-a096f92fa3a4/zookeeper_0, clientPort=63740, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/cluster_ec1afa6a-3d3e-6660-ad50-a096f92fa3a4/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/cluster_ec1afa6a-3d3e-6660-ad50-a096f92fa3a4/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-08T08:01:25,499 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63740 2024-12-08T08:01:25,499 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T08:01:25,502 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T08:01:25,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36835 is added to blk_1073741825_1001 (size=7) 2024-12-08T08:01:25,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33017 is added to blk_1073741825_1001 (size=7) 2024-12-08T08:01:25,511 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6 with version=8 2024-12-08T08:01:25,511 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35577/user/jenkins/test-data/b08a36d3-4a5b-be84-db94-ee2bce47a301/hbase-staging 2024-12-08T08:01:25,513 INFO [Time-limited test {}] client.ConnectionUtils(128): master/0106a245d0e8:0 server-side Connection retries=45 2024-12-08T08:01:25,513 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T08:01:25,513 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T08:01:25,513 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T08:01:25,513 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T08:01:25,513 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T08:01:25,513 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-08T08:01:25,513 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T08:01:25,514 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40265 2024-12-08T08:01:25,514 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40265 connecting to ZooKeeper ensemble=127.0.0.1:63740 2024-12-08T08:01:25,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:402650x0, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T08:01:25,684 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40265-0x1000471a4be0000 connected 2024-12-08T08:01:25,788 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T08:01:25,789 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T08:01:25,790 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40265-0x1000471a4be0000, quorum=127.0.0.1:63740, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T08:01:25,791 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6, hbase.cluster.distributed=false 2024-12-08T08:01:25,792 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40265-0x1000471a4be0000, quorum=127.0.0.1:63740, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T08:01:25,793 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40265 2024-12-08T08:01:25,793 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40265 2024-12-08T08:01:25,793 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40265 2024-12-08T08:01:25,794 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40265 2024-12-08T08:01:25,794 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40265 2024-12-08T08:01:25,811 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0106a245d0e8:0 server-side Connection retries=45 2024-12-08T08:01:25,811 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T08:01:25,811 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T08:01:25,811 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T08:01:25,811 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T08:01:25,811 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T08:01:25,811 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T08:01:25,812 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T08:01:25,812 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42235 2024-12-08T08:01:25,813 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42235 connecting to ZooKeeper ensemble=127.0.0.1:63740 2024-12-08T08:01:25,813 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T08:01:25,814 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T08:01:25,824 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:422350x0, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T08:01:25,824 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42235-0x1000471a4be0001 connected 2024-12-08T08:01:25,824 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42235-0x1000471a4be0001, quorum=127.0.0.1:63740, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T08:01:25,825 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T08:01:25,825 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T08:01:25,826 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42235-0x1000471a4be0001, quorum=127.0.0.1:63740, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T08:01:25,827 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42235-0x1000471a4be0001, quorum=127.0.0.1:63740, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T08:01:25,827 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42235 2024-12-08T08:01:25,828 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42235 2024-12-08T08:01:25,827 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:25,827 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:25,828 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42235 2024-12-08T08:01:25,828 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42235 2024-12-08T08:01:25,829 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42235 2024-12-08T08:01:25,843 DEBUG [M:0;0106a245d0e8:40265 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0106a245d0e8:40265 2024-12-08T08:01:25,843 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/0106a245d0e8,40265,1733644885513 2024-12-08T08:01:25,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40265-0x1000471a4be0000, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T08:01:25,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42235-0x1000471a4be0001, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T08:01:25,856 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40265-0x1000471a4be0000, quorum=127.0.0.1:63740, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0106a245d0e8,40265,1733644885513 2024-12-08T08:01:25,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40265-0x1000471a4be0000, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:01:25,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42235-0x1000471a4be0001, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T08:01:25,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42235-0x1000471a4be0001, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:01:25,867 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40265-0x1000471a4be0000, quorum=127.0.0.1:63740, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T08:01:25,867 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0106a245d0e8,40265,1733644885513 from backup master directory 2024-12-08T08:01:25,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40265-0x1000471a4be0000, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0106a245d0e8,40265,1733644885513 2024-12-08T08:01:25,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42235-0x1000471a4be0001, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T08:01:25,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40265-0x1000471a4be0000, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T08:01:25,877 WARN [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T08:01:25,877 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0106a245d0e8,40265,1733644885513 2024-12-08T08:01:25,880 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/hbase.id] with ID: e0c80495-8271-4db3-b4fa-ef880ac2f7e3 2024-12-08T08:01:25,880 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/.tmp/hbase.id 2024-12-08T08:01:25,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33017 is added to blk_1073741826_1002 (size=42) 2024-12-08T08:01:25,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36835 is added to blk_1073741826_1002 (size=42) 2024-12-08T08:01:25,885 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/.tmp/hbase.id]:[hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/hbase.id] 2024-12-08T08:01:25,894 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T08:01:25,894 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-08T08:01:25,895 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-08T08:01:25,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42235-0x1000471a4be0001, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:01:25,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40265-0x1000471a4be0000, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:01:25,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33017 is added to blk_1073741827_1003 (size=196) 2024-12-08T08:01:25,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36835 is added to blk_1073741827_1003 (size=196) 2024-12-08T08:01:25,913 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T08:01:25,914 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-08T08:01:25,914 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T08:01:25,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33017 is added to blk_1073741828_1004 (size=1189) 2024-12-08T08:01:25,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36835 is added to blk_1073741828_1004 (size=1189) 2024-12-08T08:01:25,920 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/MasterData/data/master/store 2024-12-08T08:01:25,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36835 is added to blk_1073741829_1005 (size=34) 2024-12-08T08:01:25,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33017 is added to blk_1073741829_1005 (size=34) 2024-12-08T08:01:25,926 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T08:01:25,926 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T08:01:25,926 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T08:01:25,927 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T08:01:25,927 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T08:01:25,927 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T08:01:25,927 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T08:01:25,927 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733644885926Disabling compacts and flushes for region at 1733644885926Disabling writes for close at 1733644885927 (+1 ms)Writing region close event to WAL at 1733644885927Closed at 1733644885927 2024-12-08T08:01:25,928 WARN [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/MasterData/data/master/store/.initializing 2024-12-08T08:01:25,928 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/MasterData/WALs/0106a245d0e8,40265,1733644885513 2024-12-08T08:01:25,930 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0106a245d0e8%2C40265%2C1733644885513, suffix=, logDir=hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/MasterData/WALs/0106a245d0e8,40265,1733644885513, archiveDir=hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/MasterData/oldWALs, maxLogs=10 2024-12-08T08:01:25,930 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C40265%2C1733644885513.1733644885930 2024-12-08T08:01:25,934 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/MasterData/WALs/0106a245d0e8,40265,1733644885513/0106a245d0e8%2C40265%2C1733644885513.1733644885930 2024-12-08T08:01:25,934 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40333:40333),(127.0.0.1/127.0.0.1:41839:41839)] 2024-12-08T08:01:25,935 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-08T08:01:25,935 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T08:01:25,935 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T08:01:25,935 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T08:01:25,936 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T08:01:25,937 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-08T08:01:25,937 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:01:25,938 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T08:01:25,938 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T08:01:25,939 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-08T08:01:25,939 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:01:25,939 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T08:01:25,939 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T08:01:25,940 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-08T08:01:25,940 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:01:25,940 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T08:01:25,940 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T08:01:25,941 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-08T08:01:25,941 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:01:25,941 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T08:01:25,941 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T08:01:25,942 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T08:01:25,942 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T08:01:25,943 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T08:01:25,943 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T08:01:25,944 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T08:01:25,945 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T08:01:25,946 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T08:01:25,947 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=837391, jitterRate=0.06479822099208832}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T08:01:25,947 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733644885935Initializing all the Stores at 1733644885936 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644885936Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644885936Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644885936Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644885936Cleaning up temporary data from old regions at 1733644885943 (+7 ms)Region opened successfully at 1733644885947 (+4 ms) 2024-12-08T08:01:25,947 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-08T08:01:25,949 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ecc8e1a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0106a245d0e8/172.17.0.2:0 2024-12-08T08:01:25,950 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-08T08:01:25,950 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-08T08:01:25,950 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-08T08:01:25,950 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-08T08:01:25,951 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-08T08:01:25,951 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-08T08:01:25,951 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-08T08:01:25,953 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-08T08:01:25,954 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40265-0x1000471a4be0000, quorum=127.0.0.1:63740, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-08T08:01:25,966 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-08T08:01:25,967 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-08T08:01:25,967 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40265-0x1000471a4be0000, quorum=127.0.0.1:63740, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-08T08:01:25,977 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-08T08:01:25,977 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-08T08:01:25,979 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40265-0x1000471a4be0000, quorum=127.0.0.1:63740, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-08T08:01:25,987 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-08T08:01:25,989 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40265-0x1000471a4be0000, quorum=127.0.0.1:63740, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-08T08:01:25,998 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-08T08:01:26,003 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40265-0x1000471a4be0000, quorum=127.0.0.1:63740, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-08T08:01:26,014 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-08T08:01:26,024 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40265-0x1000471a4be0000, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T08:01:26,024 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42235-0x1000471a4be0001, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T08:01:26,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40265-0x1000471a4be0000, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:01:26,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42235-0x1000471a4be0001, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:01:26,026 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=0106a245d0e8,40265,1733644885513, sessionid=0x1000471a4be0000, setting cluster-up flag (Was=false) 2024-12-08T08:01:26,046 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40265-0x1000471a4be0000, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:01:26,046 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42235-0x1000471a4be0001, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:01:26,077 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-08T08:01:26,078 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0106a245d0e8,40265,1733644885513 2024-12-08T08:01:26,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42235-0x1000471a4be0001, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:01:26,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40265-0x1000471a4be0000, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:01:26,235 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-08T08:01:26,239 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0106a245d0e8,40265,1733644885513 2024-12-08T08:01:26,242 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-08T08:01:26,244 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-08T08:01:26,244 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-08T08:01:26,244 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-08T08:01:26,244 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0106a245d0e8,40265,1733644885513 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-08T08:01:26,247 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0106a245d0e8:0, corePoolSize=5, maxPoolSize=5 2024-12-08T08:01:26,247 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0106a245d0e8:0, corePoolSize=5, maxPoolSize=5 2024-12-08T08:01:26,247 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0106a245d0e8:0, corePoolSize=5, maxPoolSize=5 2024-12-08T08:01:26,247 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0106a245d0e8:0, corePoolSize=5, maxPoolSize=5 2024-12-08T08:01:26,247 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0106a245d0e8:0, corePoolSize=10, maxPoolSize=10 2024-12-08T08:01:26,247 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T08:01:26,247 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0106a245d0e8:0, corePoolSize=2, maxPoolSize=2 2024-12-08T08:01:26,247 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T08:01:26,249 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733644916249 2024-12-08T08:01:26,249 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-08T08:01:26,249 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T08:01:26,249 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-08T08:01:26,249 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-08T08:01:26,249 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-08T08:01:26,249 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-08T08:01:26,249 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-08T08:01:26,249 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-08T08:01:26,249 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T08:01:26,250 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-08T08:01:26,250 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-08T08:01:26,250 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-08T08:01:26,250 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-08T08:01:26,250 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-08T08:01:26,250 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:01:26,250 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.large.0-1733644886250,5,FailOnTimeoutGroup] 2024-12-08T08:01:26,250 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-08T08:01:26,251 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.small.0-1733644886250,5,FailOnTimeoutGroup] 2024-12-08T08:01:26,251 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T08:01:26,251 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-08T08:01:26,251 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-08T08:01:26,251 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-08T08:01:26,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33017 is added to blk_1073741831_1007 (size=1321) 2024-12-08T08:01:26,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36835 is added to blk_1073741831_1007 (size=1321) 2024-12-08T08:01:26,258 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-08T08:01:26,258 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6 2024-12-08T08:01:26,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36835 is added to blk_1073741832_1008 (size=32) 2024-12-08T08:01:26,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33017 is added to blk_1073741832_1008 (size=32) 2024-12-08T08:01:26,263 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T08:01:26,264 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T08:01:26,265 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T08:01:26,265 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:01:26,266 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T08:01:26,266 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T08:01:26,267 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T08:01:26,267 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:01:26,267 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T08:01:26,267 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T08:01:26,268 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T08:01:26,268 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:01:26,268 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T08:01:26,268 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T08:01:26,269 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T08:01:26,269 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:01:26,269 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T08:01:26,270 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T08:01:26,270 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/data/hbase/meta/1588230740 2024-12-08T08:01:26,270 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/data/hbase/meta/1588230740 2024-12-08T08:01:26,272 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T08:01:26,272 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T08:01:26,272 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T08:01:26,273 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T08:01:26,276 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T08:01:26,276 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=795579, jitterRate=0.011631011962890625}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T08:01:26,277 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733644886263Initializing all the Stores at 1733644886264 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644886264Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644886264Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644886264Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644886264Cleaning up temporary data from old regions at 1733644886272 (+8 ms)Region opened successfully at 1733644886277 (+5 ms) 2024-12-08T08:01:26,277 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T08:01:26,277 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T08:01:26,277 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T08:01:26,277 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T08:01:26,277 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T08:01:26,277 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T08:01:26,277 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733644886277Disabling compacts and flushes for region at 1733644886277Disabling writes for close at 1733644886277Writing region close event to WAL at 1733644886277Closed at 1733644886277 2024-12-08T08:01:26,278 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T08:01:26,278 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-08T08:01:26,278 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-08T08:01:26,279 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T08:01:26,280 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-08T08:01:26,332 INFO [RS:0;0106a245d0e8:42235 {}] regionserver.HRegionServer(746): ClusterId : e0c80495-8271-4db3-b4fa-ef880ac2f7e3 2024-12-08T08:01:26,332 DEBUG [RS:0;0106a245d0e8:42235 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T08:01:26,341 DEBUG [RS:0;0106a245d0e8:42235 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T08:01:26,341 DEBUG [RS:0;0106a245d0e8:42235 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T08:01:26,352 DEBUG [RS:0;0106a245d0e8:42235 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T08:01:26,352 DEBUG [RS:0;0106a245d0e8:42235 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3910b78a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0106a245d0e8/172.17.0.2:0 2024-12-08T08:01:26,369 DEBUG [RS:0;0106a245d0e8:42235 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0106a245d0e8:42235 2024-12-08T08:01:26,369 INFO [RS:0;0106a245d0e8:42235 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T08:01:26,369 INFO [RS:0;0106a245d0e8:42235 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T08:01:26,369 DEBUG [RS:0;0106a245d0e8:42235 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T08:01:26,370 INFO [RS:0;0106a245d0e8:42235 {}] regionserver.HRegionServer(2659): reportForDuty to master=0106a245d0e8,40265,1733644885513 with port=42235, startcode=1733644885811 2024-12-08T08:01:26,370 DEBUG [RS:0;0106a245d0e8:42235 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T08:01:26,372 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55399, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T08:01:26,372 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40265 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0106a245d0e8,42235,1733644885811 2024-12-08T08:01:26,372 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40265 {}] master.ServerManager(517): Registering regionserver=0106a245d0e8,42235,1733644885811 2024-12-08T08:01:26,373 DEBUG [RS:0;0106a245d0e8:42235 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6 2024-12-08T08:01:26,373 DEBUG [RS:0;0106a245d0e8:42235 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34345 2024-12-08T08:01:26,373 DEBUG [RS:0;0106a245d0e8:42235 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T08:01:26,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40265-0x1000471a4be0000, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T08:01:26,382 DEBUG [RS:0;0106a245d0e8:42235 {}] zookeeper.ZKUtil(111): regionserver:42235-0x1000471a4be0001, quorum=127.0.0.1:63740, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0106a245d0e8,42235,1733644885811 2024-12-08T08:01:26,382 WARN [RS:0;0106a245d0e8:42235 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T08:01:26,382 INFO [RS:0;0106a245d0e8:42235 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T08:01:26,382 DEBUG [RS:0;0106a245d0e8:42235 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/WALs/0106a245d0e8,42235,1733644885811 2024-12-08T08:01:26,383 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0106a245d0e8,42235,1733644885811] 2024-12-08T08:01:26,385 INFO [RS:0;0106a245d0e8:42235 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T08:01:26,387 INFO [RS:0;0106a245d0e8:42235 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T08:01:26,388 INFO [RS:0;0106a245d0e8:42235 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T08:01:26,388 INFO [RS:0;0106a245d0e8:42235 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T08:01:26,388 INFO [RS:0;0106a245d0e8:42235 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T08:01:26,389 INFO [RS:0;0106a245d0e8:42235 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T08:01:26,389 INFO [RS:0;0106a245d0e8:42235 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T08:01:26,389 DEBUG [RS:0;0106a245d0e8:42235 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T08:01:26,389 DEBUG [RS:0;0106a245d0e8:42235 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T08:01:26,390 DEBUG [RS:0;0106a245d0e8:42235 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T08:01:26,390 DEBUG [RS:0;0106a245d0e8:42235 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T08:01:26,390 DEBUG [RS:0;0106a245d0e8:42235 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T08:01:26,390 DEBUG [RS:0;0106a245d0e8:42235 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0106a245d0e8:0, corePoolSize=2, maxPoolSize=2 2024-12-08T08:01:26,390 DEBUG [RS:0;0106a245d0e8:42235 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T08:01:26,390 DEBUG [RS:0;0106a245d0e8:42235 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T08:01:26,390 DEBUG [RS:0;0106a245d0e8:42235 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T08:01:26,390 DEBUG [RS:0;0106a245d0e8:42235 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T08:01:26,390 DEBUG [RS:0;0106a245d0e8:42235 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T08:01:26,390 DEBUG [RS:0;0106a245d0e8:42235 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0106a245d0e8:0, corePoolSize=1, maxPoolSize=1 2024-12-08T08:01:26,390 DEBUG [RS:0;0106a245d0e8:42235 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0106a245d0e8:0, corePoolSize=3, maxPoolSize=3 2024-12-08T08:01:26,390 DEBUG [RS:0;0106a245d0e8:42235 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0106a245d0e8:0, corePoolSize=3, maxPoolSize=3 2024-12-08T08:01:26,391 INFO [RS:0;0106a245d0e8:42235 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T08:01:26,391 INFO [RS:0;0106a245d0e8:42235 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T08:01:26,391 INFO [RS:0;0106a245d0e8:42235 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T08:01:26,391 INFO [RS:0;0106a245d0e8:42235 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T08:01:26,391 INFO [RS:0;0106a245d0e8:42235 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T08:01:26,391 INFO [RS:0;0106a245d0e8:42235 {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,42235,1733644885811-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T08:01:26,403 INFO [RS:0;0106a245d0e8:42235 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T08:01:26,403 INFO [RS:0;0106a245d0e8:42235 {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,42235,1733644885811-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T08:01:26,403 INFO [RS:0;0106a245d0e8:42235 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T08:01:26,403 INFO [RS:0;0106a245d0e8:42235 {}] regionserver.Replication(171): 0106a245d0e8,42235,1733644885811 started 2024-12-08T08:01:26,415 INFO [RS:0;0106a245d0e8:42235 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T08:01:26,415 INFO [RS:0;0106a245d0e8:42235 {}] regionserver.HRegionServer(1482): Serving as 0106a245d0e8,42235,1733644885811, RpcServer on 0106a245d0e8/172.17.0.2:42235, sessionid=0x1000471a4be0001 2024-12-08T08:01:26,415 DEBUG [RS:0;0106a245d0e8:42235 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T08:01:26,415 DEBUG [RS:0;0106a245d0e8:42235 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0106a245d0e8,42235,1733644885811 2024-12-08T08:01:26,415 DEBUG [RS:0;0106a245d0e8:42235 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0106a245d0e8,42235,1733644885811' 2024-12-08T08:01:26,415 DEBUG [RS:0;0106a245d0e8:42235 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T08:01:26,416 DEBUG [RS:0;0106a245d0e8:42235 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T08:01:26,416 DEBUG [RS:0;0106a245d0e8:42235 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T08:01:26,416 DEBUG [RS:0;0106a245d0e8:42235 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T08:01:26,416 DEBUG [RS:0;0106a245d0e8:42235 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0106a245d0e8,42235,1733644885811 2024-12-08T08:01:26,416 DEBUG [RS:0;0106a245d0e8:42235 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0106a245d0e8,42235,1733644885811' 2024-12-08T08:01:26,416 DEBUG [RS:0;0106a245d0e8:42235 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T08:01:26,416 DEBUG [RS:0;0106a245d0e8:42235 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T08:01:26,416 DEBUG [RS:0;0106a245d0e8:42235 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T08:01:26,416 INFO [RS:0;0106a245d0e8:42235 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T08:01:26,416 INFO [RS:0;0106a245d0e8:42235 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T08:01:26,431 WARN [0106a245d0e8:40265 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-08T08:01:26,520 INFO [RS:0;0106a245d0e8:42235 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0106a245d0e8%2C42235%2C1733644885811, suffix=, logDir=hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/WALs/0106a245d0e8,42235,1733644885811, archiveDir=hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/oldWALs, maxLogs=32 2024-12-08T08:01:26,520 INFO [RS:0;0106a245d0e8:42235 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C42235%2C1733644885811.1733644886520 2024-12-08T08:01:26,529 INFO [RS:0;0106a245d0e8:42235 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/WALs/0106a245d0e8,42235,1733644885811/0106a245d0e8%2C42235%2C1733644885811.1733644886520 2024-12-08T08:01:26,530 DEBUG [RS:0;0106a245d0e8:42235 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40333:40333),(127.0.0.1/127.0.0.1:41839:41839)] 2024-12-08T08:01:26,681 DEBUG [0106a245d0e8:40265 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-08T08:01:26,681 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0106a245d0e8,42235,1733644885811 2024-12-08T08:01:26,682 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0106a245d0e8,42235,1733644885811, state=OPENING 2024-12-08T08:01:26,693 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-08T08:01:26,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40265-0x1000471a4be0000, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:01:26,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42235-0x1000471a4be0001, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:01:26,704 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T08:01:26,704 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T08:01:26,704 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T08:01:26,704 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=0106a245d0e8,42235,1733644885811}] 2024-12-08T08:01:26,828 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:26,828 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:26,859 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-08T08:01:26,864 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46647, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-08T08:01:26,869 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-08T08:01:26,869 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T08:01:26,872 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0106a245d0e8%2C42235%2C1733644885811.meta, suffix=.meta, logDir=hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/WALs/0106a245d0e8,42235,1733644885811, archiveDir=hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/oldWALs, maxLogs=32 2024-12-08T08:01:26,872 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 0106a245d0e8%2C42235%2C1733644885811.meta.1733644886872.meta 2024-12-08T08:01:26,877 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/WALs/0106a245d0e8,42235,1733644885811/0106a245d0e8%2C42235%2C1733644885811.meta.1733644886872.meta 2024-12-08T08:01:26,881 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41839:41839),(127.0.0.1/127.0.0.1:40333:40333)] 2024-12-08T08:01:26,885 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-08T08:01:26,885 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-08T08:01:26,885 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-08T08:01:26,885 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-08T08:01:26,885 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-08T08:01:26,886 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T08:01:26,886 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-08T08:01:26,886 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-08T08:01:26,887 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T08:01:26,887 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T08:01:26,887 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:01:26,888 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T08:01:26,888 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T08:01:26,888 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T08:01:26,888 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:01:26,889 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T08:01:26,889 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T08:01:26,889 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T08:01:26,889 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:01:26,890 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T08:01:26,890 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T08:01:26,890 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T08:01:26,890 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T08:01:26,891 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T08:01:26,891 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T08:01:26,891 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/data/hbase/meta/1588230740 2024-12-08T08:01:26,892 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/data/hbase/meta/1588230740 2024-12-08T08:01:26,893 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T08:01:26,893 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T08:01:26,893 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T08:01:26,894 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T08:01:26,895 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=715203, jitterRate=-0.09057316184043884}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T08:01:26,895 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-08T08:01:26,895 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733644886886Writing region info on filesystem at 1733644886886Initializing all the Stores at 1733644886886Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644886886Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644886887 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733644886887Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733644886887Cleaning up temporary data from old regions at 1733644886893 (+6 ms)Running coprocessor post-open hooks at 1733644886895 (+2 ms)Region opened successfully at 1733644886895 2024-12-08T08:01:26,896 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733644886859 2024-12-08T08:01:26,898 DEBUG [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-08T08:01:26,898 INFO [RS_OPEN_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-08T08:01:26,898 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0106a245d0e8,42235,1733644885811 2024-12-08T08:01:26,899 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0106a245d0e8,42235,1733644885811, state=OPEN 2024-12-08T08:01:26,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42235-0x1000471a4be0001, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T08:01:26,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40265-0x1000471a4be0000, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T08:01:26,934 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=0106a245d0e8,42235,1733644885811 2024-12-08T08:01:26,934 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T08:01:26,934 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T08:01:26,938 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-08T08:01:26,938 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=0106a245d0e8,42235,1733644885811 in 230 msec 2024-12-08T08:01:26,941 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-08T08:01:26,941 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 660 msec 2024-12-08T08:01:26,942 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T08:01:26,942 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-08T08:01:26,945 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T08:01:26,945 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0106a245d0e8,42235,1733644885811, seqNum=-1] 2024-12-08T08:01:26,945 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T08:01:26,947 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60037, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T08:01:26,954 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 709 msec 2024-12-08T08:01:26,954 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733644886954, completionTime=-1 2024-12-08T08:01:26,954 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-08T08:01:26,954 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-08T08:01:26,956 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-08T08:01:26,956 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733644946956 2024-12-08T08:01:26,956 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733645006956 2024-12-08T08:01:26,956 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-08T08:01:26,957 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,40265,1733644885513-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T08:01:26,957 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,40265,1733644885513-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T08:01:26,957 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,40265,1733644885513-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T08:01:26,957 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0106a245d0e8:40265, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T08:01:26,957 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-08T08:01:26,957 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-08T08:01:26,959 DEBUG [master/0106a245d0e8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-08T08:01:26,962 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.085sec 2024-12-08T08:01:26,962 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-08T08:01:26,962 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-08T08:01:26,962 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-08T08:01:26,962 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-08T08:01:26,962 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-08T08:01:26,963 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,40265,1733644885513-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T08:01:26,963 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,40265,1733644885513-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-08T08:01:26,965 DEBUG [master/0106a245d0e8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-08T08:01:26,965 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-08T08:01:26,965 INFO [master/0106a245d0e8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0106a245d0e8,40265,1733644885513-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T08:01:27,032 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e3053e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T08:01:27,032 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 0106a245d0e8,40265,-1 for getting cluster id 2024-12-08T08:01:27,032 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-08T08:01:27,033 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e0c80495-8271-4db3-b4fa-ef880ac2f7e3' 2024-12-08T08:01:27,034 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-08T08:01:27,034 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e0c80495-8271-4db3-b4fa-ef880ac2f7e3" 2024-12-08T08:01:27,034 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cfe972c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T08:01:27,034 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [0106a245d0e8,40265,-1] 2024-12-08T08:01:27,035 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-08T08:01:27,035 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T08:01:27,036 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52196, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-08T08:01:27,037 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ec83466, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T08:01:27,037 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T08:01:27,038 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0106a245d0e8,42235,1733644885811, seqNum=-1] 2024-12-08T08:01:27,039 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T08:01:27,040 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43572, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T08:01:27,041 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=0106a245d0e8,40265,1733644885513 2024-12-08T08:01:27,042 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T08:01:27,044 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-08T08:01:27,045 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T08:01:27,047 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/WALs/test.com,8080,1, archiveDir=hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/oldWALs, maxLogs=32 2024-12-08T08:01:27,048 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733644887047 2024-12-08T08:01:27,053 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/WALs/test.com,8080,1/test.com%2C8080%2C1.1733644887047 2024-12-08T08:01:27,054 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40333:40333),(127.0.0.1/127.0.0.1:41839:41839)] 2024-12-08T08:01:27,055 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733644887055 2024-12-08T08:01:27,060 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:27,060 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:27,060 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:27,060 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:27,060 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:27,060 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/WALs/test.com,8080,1/test.com%2C8080%2C1.1733644887047 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/WALs/test.com,8080,1/test.com%2C8080%2C1.1733644887055 2024-12-08T08:01:27,061 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40333:40333),(127.0.0.1/127.0.0.1:41839:41839)] 2024-12-08T08:01:27,062 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/WALs/test.com,8080,1/test.com%2C8080%2C1.1733644887047 is not closed yet, will try archiving it next time 2024-12-08T08:01:27,062 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:27,062 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:27,062 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:27,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33017 is added to blk_1073741835_1011 (size=93) 2024-12-08T08:01:27,063 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:27,063 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:27,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36835 is added to blk_1073741835_1011 (size=93) 2024-12-08T08:01:27,064 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/WALs/test.com,8080,1/test.com%2C8080%2C1.1733644887047 to hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/oldWALs/test.com%2C8080%2C1.1733644887047 2024-12-08T08:01:27,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33017 is added to blk_1073741836_1012 (size=93) 2024-12-08T08:01:27,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36835 is added to blk_1073741836_1012 (size=93) 2024-12-08T08:01:27,068 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/oldWALs 2024-12-08T08:01:27,068 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1733644887055) 2024-12-08T08:01:27,068 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-08T08:01:27,068 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T08:01:27,068 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T08:01:27,068 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T08:01:27,068 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T08:01:27,068 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-08T08:01:27,069 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-08T08:01:27,069 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1067879265, stopped=false 2024-12-08T08:01:27,069 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=0106a245d0e8,40265,1733644885513 2024-12-08T08:01:27,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40265-0x1000471a4be0000, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T08:01:27,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42235-0x1000471a4be0001, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T08:01:27,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42235-0x1000471a4be0001, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:01:27,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40265-0x1000471a4be0000, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:01:27,093 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T08:01:27,093 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T08:01:27,093 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T08:01:27,093 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T08:01:27,093 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40265-0x1000471a4be0000, quorum=127.0.0.1:63740, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T08:01:27,093 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42235-0x1000471a4be0001, quorum=127.0.0.1:63740, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T08:01:27,093 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0106a245d0e8,42235,1733644885811' ***** 2024-12-08T08:01:27,093 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T08:01:27,094 INFO [RS:0;0106a245d0e8:42235 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T08:01:27,094 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T08:01:27,094 INFO [RS:0;0106a245d0e8:42235 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T08:01:27,094 INFO [RS:0;0106a245d0e8:42235 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T08:01:27,094 INFO [RS:0;0106a245d0e8:42235 {}] regionserver.HRegionServer(959): stopping server 0106a245d0e8,42235,1733644885811 2024-12-08T08:01:27,094 INFO [RS:0;0106a245d0e8:42235 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T08:01:27,094 INFO [RS:0;0106a245d0e8:42235 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;0106a245d0e8:42235. 2024-12-08T08:01:27,094 DEBUG [RS:0;0106a245d0e8:42235 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T08:01:27,094 DEBUG [RS:0;0106a245d0e8:42235 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T08:01:27,094 INFO [RS:0;0106a245d0e8:42235 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T08:01:27,094 INFO [RS:0;0106a245d0e8:42235 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T08:01:27,094 INFO [RS:0;0106a245d0e8:42235 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T08:01:27,094 INFO [RS:0;0106a245d0e8:42235 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-08T08:01:27,094 INFO [RS:0;0106a245d0e8:42235 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-08T08:01:27,094 DEBUG [RS:0;0106a245d0e8:42235 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-08T08:01:27,094 DEBUG [RS:0;0106a245d0e8:42235 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-08T08:01:27,094 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T08:01:27,094 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T08:01:27,094 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T08:01:27,094 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T08:01:27,094 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T08:01:27,095 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-08T08:01:27,108 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/data/hbase/meta/1588230740/.tmp/ns/c2cd652aa66a427b8a637b3fe37f1b70 is 43, key is default/ns:d/1733644886947/Put/seqid=0 2024-12-08T08:01:27,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36835 is added to blk_1073741837_1013 (size=5153) 2024-12-08T08:01:27,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33017 is added to blk_1073741837_1013 (size=5153) 2024-12-08T08:01:27,112 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/data/hbase/meta/1588230740/.tmp/ns/c2cd652aa66a427b8a637b3fe37f1b70 2024-12-08T08:01:27,117 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/data/hbase/meta/1588230740/.tmp/ns/c2cd652aa66a427b8a637b3fe37f1b70 as hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/data/hbase/meta/1588230740/ns/c2cd652aa66a427b8a637b3fe37f1b70 2024-12-08T08:01:27,121 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/data/hbase/meta/1588230740/ns/c2cd652aa66a427b8a637b3fe37f1b70, entries=2, sequenceid=6, filesize=5.0 K 2024-12-08T08:01:27,122 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 28ms, sequenceid=6, compaction requested=false 2024-12-08T08:01:27,122 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-08T08:01:27,126 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-08T08:01:27,127 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T08:01:27,127 INFO [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T08:01:27,127 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733644887094Running coprocessor pre-close hooks at 1733644887094Disabling compacts and flushes for region at 1733644887094Disabling writes for close at 1733644887094Obtaining lock to block concurrent updates at 1733644887095 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733644887095Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733644887095Flushing stores of hbase:meta,,1.1588230740 at 1733644887095Flushing 1588230740/ns: creating writer at 1733644887095Flushing 1588230740/ns: appending metadata at 1733644887108 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1733644887108Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1592cbee: reopening flushed file at 1733644887116 (+8 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 28ms, sequenceid=6, compaction requested=false at 1733644887122 (+6 ms)Writing region close event to WAL at 1733644887123 (+1 ms)Running coprocessor post-close hooks at 1733644887127 (+4 ms)Closed at 1733644887127 2024-12-08T08:01:27,127 DEBUG [RS_CLOSE_META-regionserver/0106a245d0e8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-08T08:01:27,295 INFO [RS:0;0106a245d0e8:42235 {}] regionserver.HRegionServer(976): stopping server 0106a245d0e8,42235,1733644885811; all regions closed. 2024-12-08T08:01:27,296 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:27,296 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:27,297 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:27,297 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:27,297 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:27,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36835 is added to blk_1073741834_1010 (size=1152) 2024-12-08T08:01:27,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33017 is added to blk_1073741834_1010 (size=1152) 2024-12-08T08:01:27,302 DEBUG [RS:0;0106a245d0e8:42235 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/oldWALs 2024-12-08T08:01:27,302 INFO [RS:0;0106a245d0e8:42235 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0106a245d0e8%2C42235%2C1733644885811.meta:.meta(num 1733644886872) 2024-12-08T08:01:27,302 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:27,303 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:27,303 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:27,303 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:27,303 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:27,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33017 is added to blk_1073741833_1009 (size=93) 2024-12-08T08:01:27,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36835 is added to blk_1073741833_1009 (size=93) 2024-12-08T08:01:27,468 INFO [regionserver/0106a245d0e8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-08T08:01:27,468 INFO [regionserver/0106a245d0e8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-08T08:01:27,708 DEBUG [RS:0;0106a245d0e8:42235 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/oldWALs 2024-12-08T08:01:27,708 INFO [RS:0;0106a245d0e8:42235 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0106a245d0e8%2C42235%2C1733644885811:(num 1733644886520) 2024-12-08T08:01:27,708 DEBUG [RS:0;0106a245d0e8:42235 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T08:01:27,708 INFO [RS:0;0106a245d0e8:42235 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T08:01:27,708 INFO [RS:0;0106a245d0e8:42235 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T08:01:27,709 INFO [RS:0;0106a245d0e8:42235 {}] hbase.ChoreService(370): Chore service for: regionserver/0106a245d0e8:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-08T08:01:27,709 INFO [RS:0;0106a245d0e8:42235 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T08:01:27,709 INFO [regionserver/0106a245d0e8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T08:01:27,709 INFO [RS:0;0106a245d0e8:42235 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42235 2024-12-08T08:01:27,720 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:27,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:27,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:27,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:27,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:27,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:27,722 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:27,722 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:27,722 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:27,722 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:27,745 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:27,746 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:27,746 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:27,746 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:27,746 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:27,746 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:27,748 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:27,748 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:27,748 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:27,749 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T08:01:27,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42235-0x1000471a4be0001, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0106a245d0e8,42235,1733644885811 2024-12-08T08:01:27,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40265-0x1000471a4be0000, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T08:01:27,756 INFO [RS:0;0106a245d0e8:42235 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T08:01:27,766 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0106a245d0e8,42235,1733644885811] 2024-12-08T08:01:27,777 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0106a245d0e8,42235,1733644885811 already deleted, retry=false 2024-12-08T08:01:27,777 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0106a245d0e8,42235,1733644885811 expired; onlineServers=0 2024-12-08T08:01:27,777 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '0106a245d0e8,40265,1733644885513' ***** 2024-12-08T08:01:27,777 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-08T08:01:27,777 INFO [M:0;0106a245d0e8:40265 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T08:01:27,777 INFO [M:0;0106a245d0e8:40265 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T08:01:27,777 DEBUG [M:0;0106a245d0e8:40265 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-08T08:01:27,777 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-08T08:01:27,777 DEBUG [M:0;0106a245d0e8:40265 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-08T08:01:27,777 DEBUG [master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.large.0-1733644886250 {}] cleaner.HFileCleaner(306): Exit Thread[master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.large.0-1733644886250,5,FailOnTimeoutGroup] 2024-12-08T08:01:27,777 DEBUG [master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.small.0-1733644886250 {}] cleaner.HFileCleaner(306): Exit Thread[master/0106a245d0e8:0:becomeActiveMaster-HFileCleaner.small.0-1733644886250,5,FailOnTimeoutGroup] 2024-12-08T08:01:27,777 INFO [M:0;0106a245d0e8:40265 {}] hbase.ChoreService(370): Chore service for: master/0106a245d0e8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-08T08:01:27,777 INFO [M:0;0106a245d0e8:40265 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T08:01:27,778 DEBUG [M:0;0106a245d0e8:40265 {}] master.HMaster(1795): Stopping service threads 2024-12-08T08:01:27,778 INFO [M:0;0106a245d0e8:40265 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-08T08:01:27,778 INFO [M:0;0106a245d0e8:40265 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T08:01:27,778 INFO [M:0;0106a245d0e8:40265 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-08T08:01:27,778 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-08T08:01:27,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40265-0x1000471a4be0000, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T08:01:27,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40265-0x1000471a4be0000, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T08:01:27,787 DEBUG [M:0;0106a245d0e8:40265 {}] zookeeper.ZKUtil(347): master:40265-0x1000471a4be0000, quorum=127.0.0.1:63740, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-08T08:01:27,787 WARN [M:0;0106a245d0e8:40265 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-08T08:01:27,788 INFO [M:0;0106a245d0e8:40265 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/.lastflushedseqids 2024-12-08T08:01:27,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36835 is added to blk_1073741838_1014 (size=99) 2024-12-08T08:01:27,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33017 is added to blk_1073741838_1014 (size=99) 2024-12-08T08:01:27,793 INFO [M:0;0106a245d0e8:40265 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-08T08:01:27,793 INFO [M:0;0106a245d0e8:40265 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-08T08:01:27,793 DEBUG [M:0;0106a245d0e8:40265 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T08:01:27,793 INFO [M:0;0106a245d0e8:40265 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T08:01:27,793 DEBUG [M:0;0106a245d0e8:40265 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T08:01:27,793 DEBUG [M:0;0106a245d0e8:40265 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T08:01:27,793 DEBUG [M:0;0106a245d0e8:40265 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T08:01:27,793 INFO [M:0;0106a245d0e8:40265 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-08T08:01:27,808 DEBUG [M:0;0106a245d0e8:40265 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/eeb2ad0eb03b45319431a11267353343 is 82, key is hbase:meta,,1/info:regioninfo/1733644886898/Put/seqid=0 2024-12-08T08:01:27,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36835 is added to blk_1073741839_1015 (size=5672) 2024-12-08T08:01:27,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33017 is added to blk_1073741839_1015 (size=5672) 2024-12-08T08:01:27,812 INFO [M:0;0106a245d0e8:40265 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/eeb2ad0eb03b45319431a11267353343 2024-12-08T08:01:27,829 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,42555,1733644663236/0106a245d0e8%2C42555%2C1733644663236.1733644663469 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:27,829 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37725/user/jenkins/test-data/69618439-08a0-af2d-4ca6-f5fc892bd621/WALs/0106a245d0e8,43367,1733644661968/0106a245d0e8%2C43367%2C1733644661968.meta.1733644663048.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T08:01:27,830 DEBUG [M:0;0106a245d0e8:40265 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/31c252b221c4409286b7fb9263e331f4 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733644886953/Put/seqid=0 2024-12-08T08:01:27,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33017 is added to blk_1073741840_1016 (size=5275) 2024-12-08T08:01:27,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36835 is added to blk_1073741840_1016 (size=5275) 2024-12-08T08:01:27,834 INFO [M:0;0106a245d0e8:40265 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/31c252b221c4409286b7fb9263e331f4 2024-12-08T08:01:27,850 DEBUG [M:0;0106a245d0e8:40265 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cd18a27410d64b16bb1e7672584c5e97 is 69, key is 0106a245d0e8,42235,1733644885811/rs:state/1733644886372/Put/seqid=0 2024-12-08T08:01:27,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36835 is added to blk_1073741841_1017 (size=5156) 2024-12-08T08:01:27,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33017 is added to blk_1073741841_1017 (size=5156) 2024-12-08T08:01:27,854 INFO [M:0;0106a245d0e8:40265 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cd18a27410d64b16bb1e7672584c5e97 2024-12-08T08:01:27,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42235-0x1000471a4be0001, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T08:01:27,866 INFO [RS:0;0106a245d0e8:42235 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T08:01:27,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42235-0x1000471a4be0001, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T08:01:27,866 INFO [RS:0;0106a245d0e8:42235 {}] regionserver.HRegionServer(1031): Exiting; stopping=0106a245d0e8,42235,1733644885811; zookeeper connection closed. 2024-12-08T08:01:27,867 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@65894b6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@65894b6 2024-12-08T08:01:27,867 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-08T08:01:27,871 DEBUG [M:0;0106a245d0e8:40265 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/90f2293be44a41de9fb17cfe97f925dd is 52, key is load_balancer_on/state:d/1733644887043/Put/seqid=0 2024-12-08T08:01:27,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33017 is added to blk_1073741842_1018 (size=5056) 2024-12-08T08:01:27,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36835 is added to blk_1073741842_1018 (size=5056) 2024-12-08T08:01:27,876 INFO [M:0;0106a245d0e8:40265 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/90f2293be44a41de9fb17cfe97f925dd 2024-12-08T08:01:27,880 DEBUG [M:0;0106a245d0e8:40265 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/eeb2ad0eb03b45319431a11267353343 as hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/eeb2ad0eb03b45319431a11267353343 2024-12-08T08:01:27,884 INFO [M:0;0106a245d0e8:40265 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/eeb2ad0eb03b45319431a11267353343, entries=8, sequenceid=29, filesize=5.5 K 2024-12-08T08:01:27,885 DEBUG [M:0;0106a245d0e8:40265 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/31c252b221c4409286b7fb9263e331f4 as hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/31c252b221c4409286b7fb9263e331f4 2024-12-08T08:01:27,889 INFO [M:0;0106a245d0e8:40265 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/31c252b221c4409286b7fb9263e331f4, entries=3, sequenceid=29, filesize=5.2 K 2024-12-08T08:01:27,889 DEBUG [M:0;0106a245d0e8:40265 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cd18a27410d64b16bb1e7672584c5e97 as hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/cd18a27410d64b16bb1e7672584c5e97 2024-12-08T08:01:27,893 INFO [M:0;0106a245d0e8:40265 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/cd18a27410d64b16bb1e7672584c5e97, entries=1, sequenceid=29, filesize=5.0 K 2024-12-08T08:01:27,894 DEBUG [M:0;0106a245d0e8:40265 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/90f2293be44a41de9fb17cfe97f925dd as hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/90f2293be44a41de9fb17cfe97f925dd 2024-12-08T08:01:27,898 INFO [M:0;0106a245d0e8:40265 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34345/user/jenkins/test-data/f9b91578-0f68-82e3-7a98-9a705eec28f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/90f2293be44a41de9fb17cfe97f925dd, entries=1, sequenceid=29, filesize=4.9 K 2024-12-08T08:01:27,899 INFO [M:0;0106a245d0e8:40265 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 106ms, sequenceid=29, compaction requested=false 2024-12-08T08:01:27,901 INFO [M:0;0106a245d0e8:40265 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T08:01:27,901 DEBUG [M:0;0106a245d0e8:40265 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733644887793Disabling compacts and flushes for region at 1733644887793Disabling writes for close at 1733644887793Obtaining lock to block concurrent updates at 1733644887793Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733644887793Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733644887794 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733644887794Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733644887794Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733644887808 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733644887808Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733644887816 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733644887829 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733644887829Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733644887838 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733644887849 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733644887849Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733644887858 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733644887871 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733644887871Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@330f876b: reopening flushed file at 1733644887880 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@feb7cab: reopening flushed file at 1733644887884 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@241a7cf0: reopening flushed file at 1733644887889 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@34644b01: reopening flushed file at 1733644887893 (+4 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 106ms, sequenceid=29, compaction requested=false at 1733644887899 (+6 ms)Writing region close event to WAL at 1733644887901 (+2 ms)Closed at 1733644887901 2024-12-08T08:01:27,901 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:27,901 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:27,901 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:27,901 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:27,901 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T08:01:27,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33017 is added to blk_1073741830_1006 (size=10311) 2024-12-08T08:01:27,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36835 is added to blk_1073741830_1006 (size=10311) 2024-12-08T08:01:27,904 INFO [M:0;0106a245d0e8:40265 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-08T08:01:27,904 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T08:01:27,904 INFO [M:0;0106a245d0e8:40265 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40265 2024-12-08T08:01:27,904 INFO [M:0;0106a245d0e8:40265 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T08:01:28,014 INFO [M:0;0106a245d0e8:40265 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T08:01:28,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40265-0x1000471a4be0000, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T08:01:28,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40265-0x1000471a4be0000, quorum=127.0.0.1:63740, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T08:01:28,017 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76be211{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T08:01:28,018 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c2fc043{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T08:01:28,018 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T08:01:28,018 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@14628127{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T08:01:28,018 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@22ed154c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/hadoop.log.dir/,STOPPED} 2024-12-08T08:01:28,020 WARN [BP-493202706-172.17.0.2-1733644883334 heartbeating to localhost/127.0.0.1:34345 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T08:01:28,020 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T08:01:28,020 WARN [BP-493202706-172.17.0.2-1733644883334 heartbeating to localhost/127.0.0.1:34345 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-493202706-172.17.0.2-1733644883334 (Datanode Uuid 287aab61-576e-423f-93ab-9f4b9fd4aa1c) service to localhost/127.0.0.1:34345 2024-12-08T08:01:28,020 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T08:01:28,021 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/cluster_ec1afa6a-3d3e-6660-ad50-a096f92fa3a4/data/data3/current/BP-493202706-172.17.0.2-1733644883334 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T08:01:28,022 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/cluster_ec1afa6a-3d3e-6660-ad50-a096f92fa3a4/data/data4/current/BP-493202706-172.17.0.2-1733644883334 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T08:01:28,022 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T08:01:28,026 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@23e03366{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T08:01:28,026 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2fb7753b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T08:01:28,027 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T08:01:28,027 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4bb82bb7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T08:01:28,027 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2a1af98b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/hadoop.log.dir/,STOPPED} 2024-12-08T08:01:28,028 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T08:01:28,028 WARN [BP-493202706-172.17.0.2-1733644883334 heartbeating to localhost/127.0.0.1:34345 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T08:01:28,028 WARN [BP-493202706-172.17.0.2-1733644883334 heartbeating to localhost/127.0.0.1:34345 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-493202706-172.17.0.2-1733644883334 (Datanode Uuid ff560f21-5653-411f-98ef-99fa0569accc) service to localhost/127.0.0.1:34345 2024-12-08T08:01:28,028 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T08:01:28,028 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/cluster_ec1afa6a-3d3e-6660-ad50-a096f92fa3a4/data/data1/current/BP-493202706-172.17.0.2-1733644883334 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T08:01:28,029 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/cluster_ec1afa6a-3d3e-6660-ad50-a096f92fa3a4/data/data2/current/BP-493202706-172.17.0.2-1733644883334 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T08:01:28,029 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T08:01:28,034 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@186f146f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T08:01:28,034 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@10e33487{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T08:01:28,034 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T08:01:28,034 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@b404f99{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T08:01:28,034 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@377e4a58{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/771f8aa2-01f9-b81e-686e-a3c6d57db342/hadoop.log.dir/,STOPPED} 2024-12-08T08:01:28,040 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-08T08:01:28,054 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-08T08:01:28,061 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=265 (was 225) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:34345 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34345 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/0106a245d0e8:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34345 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34345 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34345 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34345 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34345 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34345 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=537 (was 512) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=115 (was 99) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=8739 (was 8754)