2024-11-16 05:45:59,124 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-16 05:45:59,137 main DEBUG Took 0.011367 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-16 05:45:59,138 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-16 05:45:59,138 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-16 05:45:59,140 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-16 05:45:59,141 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 05:45:59,151 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-16 05:45:59,167 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 05:45:59,168 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 05:45:59,169 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 05:45:59,170 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 05:45:59,170 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 05:45:59,171 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 05:45:59,172 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 05:45:59,172 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 05:45:59,173 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 05:45:59,173 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 05:45:59,174 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 05:45:59,175 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 05:45:59,176 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 05:45:59,176 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 05:45:59,176 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 05:45:59,177 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 05:45:59,178 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 05:45:59,178 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 05:45:59,179 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 05:45:59,179 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 05:45:59,180 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 05:45:59,180 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 05:45:59,181 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 05:45:59,181 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 05:45:59,181 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 05:45:59,182 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-16 05:45:59,183 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 05:45:59,184 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-16 05:45:59,186 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-16 05:45:59,186 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-16 05:45:59,187 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-16 05:45:59,188 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-16 05:45:59,196 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-16 05:45:59,200 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-16 05:45:59,202 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-16 05:45:59,202 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-16 05:45:59,202 main DEBUG createAppenders(={Console}) 2024-11-16 05:45:59,203 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-16 05:45:59,203 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-16 05:45:59,204 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-16 05:45:59,204 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-16 05:45:59,204 main DEBUG OutputStream closed 2024-11-16 05:45:59,205 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-16 05:45:59,205 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-16 05:45:59,205 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-16 05:45:59,294 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-16 05:45:59,297 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-16 05:45:59,298 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-16 05:45:59,298 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-16 05:45:59,299 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-16 05:45:59,300 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-16 05:45:59,300 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-16 05:45:59,301 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-16 05:45:59,301 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-16 05:45:59,302 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-16 05:45:59,302 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-16 05:45:59,303 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-16 05:45:59,303 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-16 05:45:59,303 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-16 05:45:59,304 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-16 05:45:59,304 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-16 05:45:59,305 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-16 05:45:59,306 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-16 05:45:59,308 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-16 05:45:59,309 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-16 05:45:59,309 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-16 05:45:59,310 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-16T05:45:59,619 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0 2024-11-16 05:45:59,623 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-16 05:45:59,623 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-16T05:45:59,633 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-16T05:45:59,672 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=336, ProcessCount=11, AvailableMemoryMB=4327 2024-11-16T05:45:59,676 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T05:45:59,697 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/cluster_36beb1b6-185f-17c2-cec2-b567e1118dec, deleteOnExit=true 2024-11-16T05:45:59,698 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T05:45:59,699 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/test.cache.data in system properties and HBase conf 2024-11-16T05:45:59,700 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T05:45:59,701 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/hadoop.log.dir in system properties and HBase conf 2024-11-16T05:45:59,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T05:45:59,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T05:45:59,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T05:45:59,799 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-16T05:45:59,901 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T05:45:59,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T05:45:59,907 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T05:45:59,908 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T05:45:59,908 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T05:45:59,909 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T05:45:59,910 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T05:45:59,910 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T05:45:59,911 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T05:45:59,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T05:45:59,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/nfs.dump.dir in system properties and HBase conf 2024-11-16T05:45:59,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/java.io.tmpdir in system properties and HBase conf 2024-11-16T05:45:59,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T05:45:59,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T05:45:59,915 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T05:46:00,472 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T05:46:00,811 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-16T05:46:00,936 INFO [Time-limited test {}] log.Log(170): Logging initialized @2632ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-16T05:46:01,061 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:46:01,152 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T05:46:01,192 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T05:46:01,192 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T05:46:01,194 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T05:46:01,214 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:46:01,219 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f2ab976{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/hadoop.log.dir/,AVAILABLE} 2024-11-16T05:46:01,221 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46a86f8c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T05:46:01,465 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c77270f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/java.io.tmpdir/jetty-localhost-34449-hadoop-hdfs-3_4_1-tests_jar-_-any-16678325314425742544/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T05:46:01,473 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c5145e6{HTTP/1.1, (http/1.1)}{localhost:34449} 2024-11-16T05:46:01,473 INFO [Time-limited test {}] server.Server(415): Started @3172ms 2024-11-16T05:46:01,507 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T05:46:01,913 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:46:01,924 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T05:46:01,925 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T05:46:01,926 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T05:46:01,926 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T05:46:01,927 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@522d97dd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/hadoop.log.dir/,AVAILABLE} 2024-11-16T05:46:01,928 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1edc186c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T05:46:02,046 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@705aed2e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/java.io.tmpdir/jetty-localhost-42919-hadoop-hdfs-3_4_1-tests_jar-_-any-8986209766076182027/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:46:02,046 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7f582047{HTTP/1.1, (http/1.1)}{localhost:42919} 2024-11-16T05:46:02,046 INFO [Time-limited test {}] server.Server(415): Started @3745ms 2024-11-16T05:46:02,117 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T05:46:02,275 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:46:02,283 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T05:46:02,290 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T05:46:02,290 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T05:46:02,290 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T05:46:02,293 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@123edf60{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/hadoop.log.dir/,AVAILABLE} 2024-11-16T05:46:02,294 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49288de2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T05:46:02,410 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5e73d4de{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/java.io.tmpdir/jetty-localhost-33623-hadoop-hdfs-3_4_1-tests_jar-_-any-3634041551116649910/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:46:02,412 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@48024e5{HTTP/1.1, (http/1.1)}{localhost:33623} 2024-11-16T05:46:02,412 INFO [Time-limited test {}] server.Server(415): Started @4110ms 2024-11-16T05:46:02,416 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T05:46:02,616 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/cluster_36beb1b6-185f-17c2-cec2-b567e1118dec/data/data4/current/BP-1268815547-172.17.0.2-1731735960579/current, will proceed with Du for space computation calculation, 2024-11-16T05:46:02,616 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/cluster_36beb1b6-185f-17c2-cec2-b567e1118dec/data/data1/current/BP-1268815547-172.17.0.2-1731735960579/current, will proceed with Du for space computation calculation, 2024-11-16T05:46:02,616 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/cluster_36beb1b6-185f-17c2-cec2-b567e1118dec/data/data2/current/BP-1268815547-172.17.0.2-1731735960579/current, will proceed with Du for space computation calculation, 2024-11-16T05:46:02,616 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/cluster_36beb1b6-185f-17c2-cec2-b567e1118dec/data/data3/current/BP-1268815547-172.17.0.2-1731735960579/current, will proceed with Du for space computation calculation, 2024-11-16T05:46:02,758 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T05:46:02,758 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T05:46:02,841 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x48968f0793be7c16 with lease ID 0xf067d5ee374fef97: Processing first storage report for DS-a60706d9-c967-4b0b-807e-ab71279d41dc from datanode DatanodeRegistration(127.0.0.1:37001, datanodeUuid=3b4185ea-1c03-4660-bd43-40bedfae8a20, infoPort=33489, infoSecurePort=0, ipcPort=39263, storageInfo=lv=-57;cid=testClusterID;nsid=282842979;c=1731735960579) 2024-11-16T05:46:02,842 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x48968f0793be7c16 with lease ID 0xf067d5ee374fef97: from storage DS-a60706d9-c967-4b0b-807e-ab71279d41dc node DatanodeRegistration(127.0.0.1:37001, datanodeUuid=3b4185ea-1c03-4660-bd43-40bedfae8a20, infoPort=33489, infoSecurePort=0, ipcPort=39263, storageInfo=lv=-57;cid=testClusterID;nsid=282842979;c=1731735960579), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-16T05:46:02,843 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa5db217248b111d7 with lease ID 0xf067d5ee374fef98: Processing first storage report for DS-4ba8fc81-4ccf-4411-993b-67e94ea76f3e from datanode DatanodeRegistration(127.0.0.1:39803, datanodeUuid=06f41a7e-0c3b-452e-9516-3d78325f0927, infoPort=43479, infoSecurePort=0, ipcPort=37791, storageInfo=lv=-57;cid=testClusterID;nsid=282842979;c=1731735960579) 2024-11-16T05:46:02,843 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa5db217248b111d7 with lease ID 0xf067d5ee374fef98: from storage DS-4ba8fc81-4ccf-4411-993b-67e94ea76f3e node DatanodeRegistration(127.0.0.1:39803, datanodeUuid=06f41a7e-0c3b-452e-9516-3d78325f0927, infoPort=43479, infoSecurePort=0, ipcPort=37791, storageInfo=lv=-57;cid=testClusterID;nsid=282842979;c=1731735960579), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:46:02,843 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x48968f0793be7c16 with lease ID 0xf067d5ee374fef97: Processing first storage report for DS-d21aa37b-e769-4396-9049-e945befacd84 from datanode DatanodeRegistration(127.0.0.1:37001, datanodeUuid=3b4185ea-1c03-4660-bd43-40bedfae8a20, infoPort=33489, infoSecurePort=0, ipcPort=39263, storageInfo=lv=-57;cid=testClusterID;nsid=282842979;c=1731735960579) 2024-11-16T05:46:02,844 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x48968f0793be7c16 with lease ID 0xf067d5ee374fef97: from storage DS-d21aa37b-e769-4396-9049-e945befacd84 node DatanodeRegistration(127.0.0.1:37001, datanodeUuid=3b4185ea-1c03-4660-bd43-40bedfae8a20, infoPort=33489, infoSecurePort=0, ipcPort=39263, storageInfo=lv=-57;cid=testClusterID;nsid=282842979;c=1731735960579), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:46:02,844 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa5db217248b111d7 with lease ID 0xf067d5ee374fef98: Processing first storage report for DS-f4a7e7e8-070d-49db-9860-65b6bb33a158 from datanode DatanodeRegistration(127.0.0.1:39803, datanodeUuid=06f41a7e-0c3b-452e-9516-3d78325f0927, infoPort=43479, infoSecurePort=0, ipcPort=37791, storageInfo=lv=-57;cid=testClusterID;nsid=282842979;c=1731735960579) 2024-11-16T05:46:02,844 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa5db217248b111d7 with lease ID 0xf067d5ee374fef98: from storage DS-f4a7e7e8-070d-49db-9860-65b6bb33a158 node DatanodeRegistration(127.0.0.1:39803, datanodeUuid=06f41a7e-0c3b-452e-9516-3d78325f0927, infoPort=43479, infoSecurePort=0, ipcPort=37791, storageInfo=lv=-57;cid=testClusterID;nsid=282842979;c=1731735960579), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:46:02,867 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0 2024-11-16T05:46:02,975 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/cluster_36beb1b6-185f-17c2-cec2-b567e1118dec/zookeeper_0, clientPort=55961, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/cluster_36beb1b6-185f-17c2-cec2-b567e1118dec/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/cluster_36beb1b6-185f-17c2-cec2-b567e1118dec/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T05:46:02,986 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55961 2024-11-16T05:46:02,999 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:46:03,004 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:46:03,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741825_1001 (size=7) 2024-11-16T05:46:03,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741825_1001 (size=7) 2024-11-16T05:46:03,692 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2 with version=8 2024-11-16T05:46:03,692 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/hbase-staging 2024-11-16T05:46:03,783 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-16T05:46:04,064 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3456ee6a3164:0 server-side Connection retries=45 2024-11-16T05:46:04,079 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T05:46:04,080 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T05:46:04,086 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T05:46:04,087 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T05:46:04,087 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T05:46:04,245 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T05:46:04,309 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-16T05:46:04,323 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-16T05:46:04,328 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T05:46:04,357 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 68563 (auto-detected) 2024-11-16T05:46:04,358 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-16T05:46:04,380 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43225 2024-11-16T05:46:04,405 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43225 connecting to ZooKeeper ensemble=127.0.0.1:55961 2024-11-16T05:46:04,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:432250x0, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T05:46:04,446 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43225-0x100471121b10000 connected 2024-11-16T05:46:04,508 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:46:04,512 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:46:04,526 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43225-0x100471121b10000, quorum=127.0.0.1:55961, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T05:46:04,532 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2, hbase.cluster.distributed=false 2024-11-16T05:46:04,560 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43225-0x100471121b10000, quorum=127.0.0.1:55961, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T05:46:04,572 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43225 2024-11-16T05:46:04,573 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43225 2024-11-16T05:46:04,575 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43225 2024-11-16T05:46:04,581 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43225 2024-11-16T05:46:04,585 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43225 2024-11-16T05:46:04,721 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3456ee6a3164:0 server-side Connection retries=45 2024-11-16T05:46:04,724 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T05:46:04,724 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T05:46:04,725 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T05:46:04,725 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T05:46:04,725 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T05:46:04,729 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T05:46:04,733 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T05:46:04,741 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36635 2024-11-16T05:46:04,744 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36635 connecting to ZooKeeper ensemble=127.0.0.1:55961 2024-11-16T05:46:04,746 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:46:04,753 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:46:04,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:366350x0, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T05:46:04,768 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:366350x0, quorum=127.0.0.1:55961, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T05:46:04,773 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T05:46:04,777 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36635-0x100471121b10001 connected 2024-11-16T05:46:04,787 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T05:46:04,789 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36635-0x100471121b10001, quorum=127.0.0.1:55961, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T05:46:04,797 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36635-0x100471121b10001, quorum=127.0.0.1:55961, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T05:46:04,800 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36635 2024-11-16T05:46:04,802 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36635 2024-11-16T05:46:04,805 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36635 2024-11-16T05:46:04,808 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36635 2024-11-16T05:46:04,810 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36635 2024-11-16T05:46:04,826 DEBUG [M:0;3456ee6a3164:43225 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3456ee6a3164:43225 2024-11-16T05:46:04,827 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3456ee6a3164,43225,1731735963837 2024-11-16T05:46:04,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36635-0x100471121b10001, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T05:46:04,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43225-0x100471121b10000, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T05:46:04,837 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43225-0x100471121b10000, quorum=127.0.0.1:55961, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3456ee6a3164,43225,1731735963837 2024-11-16T05:46:04,862 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36635-0x100471121b10001, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T05:46:04,862 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43225-0x100471121b10000, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:46:04,862 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36635-0x100471121b10001, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:46:04,863 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43225-0x100471121b10000, quorum=127.0.0.1:55961, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T05:46:04,864 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3456ee6a3164,43225,1731735963837 from backup master directory 2024-11-16T05:46:04,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43225-0x100471121b10000, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3456ee6a3164,43225,1731735963837 2024-11-16T05:46:04,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36635-0x100471121b10001, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T05:46:04,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43225-0x100471121b10000, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T05:46:04,869 WARN [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T05:46:04,869 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3456ee6a3164,43225,1731735963837 2024-11-16T05:46:04,871 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-16T05:46:04,872 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-16T05:46:04,923 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/hbase.id] with ID: b20453a3-5349-40b5-8c29-f73915424e44 2024-11-16T05:46:04,924 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/.tmp/hbase.id 2024-11-16T05:46:04,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741826_1002 (size=42) 2024-11-16T05:46:04,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741826_1002 (size=42) 2024-11-16T05:46:04,947 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/.tmp/hbase.id]:[hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/hbase.id] 2024-11-16T05:46:05,006 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:46:05,013 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T05:46:05,033 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-11-16T05:46:05,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36635-0x100471121b10001, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:46:05,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43225-0x100471121b10000, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:46:05,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741827_1003 (size=196) 2024-11-16T05:46:05,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741827_1003 (size=196) 2024-11-16T05:46:05,075 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T05:46:05,077 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T05:46:05,083 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T05:46:05,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741828_1004 (size=1189) 2024-11-16T05:46:05,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741828_1004 (size=1189) 2024-11-16T05:46:05,159 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/MasterData/data/master/store 2024-11-16T05:46:05,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741829_1005 (size=34) 2024-11-16T05:46:05,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741829_1005 (size=34) 2024-11-16T05:46:05,193 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-16T05:46:05,202 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:46:05,204 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T05:46:05,204 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:46:05,204 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:46:05,206 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T05:46:05,206 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:46:05,206 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:46:05,208 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731735965203Disabling compacts and flushes for region at 1731735965203Disabling writes for close at 1731735965206 (+3 ms)Writing region close event to WAL at 1731735965206Closed at 1731735965206 2024-11-16T05:46:05,211 WARN [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/MasterData/data/master/store/.initializing 2024-11-16T05:46:05,211 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/MasterData/WALs/3456ee6a3164,43225,1731735963837 2024-11-16T05:46:05,243 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3456ee6a3164%2C43225%2C1731735963837, suffix=, logDir=hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/MasterData/WALs/3456ee6a3164,43225,1731735963837, archiveDir=hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/MasterData/oldWALs, maxLogs=10 2024-11-16T05:46:05,259 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C43225%2C1731735963837.1731735965253 2024-11-16T05:46:05,292 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/MasterData/WALs/3456ee6a3164,43225,1731735963837/3456ee6a3164%2C43225%2C1731735963837.1731735965253 2024-11-16T05:46:05,304 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33489:33489),(127.0.0.1/127.0.0.1:43479:43479)] 2024-11-16T05:46:05,306 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T05:46:05,307 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:46:05,311 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:46:05,312 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:46:05,355 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:46:05,385 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T05:46:05,388 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:46:05,391 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:46:05,391 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:46:05,395 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T05:46:05,395 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:46:05,397 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T05:46:05,397 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:46:05,400 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T05:46:05,401 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:46:05,402 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T05:46:05,403 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:46:05,406 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T05:46:05,406 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:46:05,408 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T05:46:05,408 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:46:05,413 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:46:05,414 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:46:05,419 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:46:05,419 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:46:05,422 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T05:46:05,426 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:46:05,432 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T05:46:05,434 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=796131, jitterRate=0.01233343780040741}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T05:46:05,441 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731735965325Initializing all the Stores at 1731735965327 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731735965328 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731735965328Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731735965329 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731735965329Cleaning up temporary data from old regions at 1731735965419 (+90 ms)Region opened successfully at 1731735965440 (+21 ms) 2024-11-16T05:46:05,442 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T05:46:05,484 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@626ecc2f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3456ee6a3164/172.17.0.2:0 2024-11-16T05:46:05,521 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T05:46:05,537 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T05:46:05,538 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T05:46:05,542 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T05:46:05,544 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-16T05:46:05,549 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-16T05:46:05,549 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T05:46:05,575 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T05:46:05,592 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43225-0x100471121b10000, quorum=127.0.0.1:55961, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T05:46:05,595 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T05:46:05,600 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T05:46:05,602 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43225-0x100471121b10000, quorum=127.0.0.1:55961, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T05:46:05,605 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T05:46:05,607 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T05:46:05,611 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43225-0x100471121b10000, quorum=127.0.0.1:55961, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T05:46:05,613 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T05:46:05,615 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43225-0x100471121b10000, quorum=127.0.0.1:55961, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T05:46:05,618 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T05:46:05,642 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43225-0x100471121b10000, quorum=127.0.0.1:55961, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T05:46:05,649 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T05:46:05,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36635-0x100471121b10001, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T05:46:05,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43225-0x100471121b10000, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T05:46:05,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36635-0x100471121b10001, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:46:05,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43225-0x100471121b10000, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:46:05,655 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3456ee6a3164,43225,1731735963837, sessionid=0x100471121b10000, setting cluster-up flag (Was=false) 2024-11-16T05:46:05,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36635-0x100471121b10001, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:46:05,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43225-0x100471121b10000, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:46:05,673 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T05:46:05,675 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3456ee6a3164,43225,1731735963837 2024-11-16T05:46:05,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36635-0x100471121b10001, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:46:05,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43225-0x100471121b10000, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:46:05,686 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T05:46:05,688 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3456ee6a3164,43225,1731735963837 2024-11-16T05:46:05,694 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T05:46:05,716 INFO [RS:0;3456ee6a3164:36635 {}] regionserver.HRegionServer(746): ClusterId : b20453a3-5349-40b5-8c29-f73915424e44 2024-11-16T05:46:05,718 DEBUG [RS:0;3456ee6a3164:36635 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T05:46:05,722 DEBUG [RS:0;3456ee6a3164:36635 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T05:46:05,722 DEBUG [RS:0;3456ee6a3164:36635 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T05:46:05,725 DEBUG [RS:0;3456ee6a3164:36635 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T05:46:05,725 DEBUG [RS:0;3456ee6a3164:36635 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1571b8ff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3456ee6a3164/172.17.0.2:0 2024-11-16T05:46:05,738 DEBUG [RS:0;3456ee6a3164:36635 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3456ee6a3164:36635 2024-11-16T05:46:05,741 INFO [RS:0;3456ee6a3164:36635 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T05:46:05,741 INFO [RS:0;3456ee6a3164:36635 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T05:46:05,741 DEBUG [RS:0;3456ee6a3164:36635 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T05:46:05,744 INFO [RS:0;3456ee6a3164:36635 {}] regionserver.HRegionServer(2659): reportForDuty to master=3456ee6a3164,43225,1731735963837 with port=36635, startcode=1731735964677 2024-11-16T05:46:05,754 DEBUG [RS:0;3456ee6a3164:36635 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T05:46:05,767 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T05:46:05,776 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T05:46:05,785 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T05:46:05,791 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3456ee6a3164,43225,1731735963837 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T05:46:05,800 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3456ee6a3164:0, corePoolSize=5, maxPoolSize=5 2024-11-16T05:46:05,800 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3456ee6a3164:0, corePoolSize=5, maxPoolSize=5 2024-11-16T05:46:05,800 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3456ee6a3164:0, corePoolSize=5, maxPoolSize=5 2024-11-16T05:46:05,801 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3456ee6a3164:0, corePoolSize=5, maxPoolSize=5 2024-11-16T05:46:05,801 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3456ee6a3164:0, corePoolSize=10, maxPoolSize=10 2024-11-16T05:46:05,801 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:46:05,801 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3456ee6a3164:0, corePoolSize=2, maxPoolSize=2 2024-11-16T05:46:05,801 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:46:05,803 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731735995803 2024-11-16T05:46:05,805 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T05:46:05,807 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T05:46:05,807 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T05:46:05,807 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T05:46:05,811 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T05:46:05,812 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T05:46:05,812 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T05:46:05,813 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T05:46:05,813 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:46:05,813 INFO [PEWorker-2 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T05:46:05,816 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T05:46:05,816 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58281, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T05:46:05,822 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T05:46:05,823 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T05:46:05,823 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T05:46:05,828 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T05:46:05,825 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43225 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-16T05:46:05,829 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T05:46:05,832 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.large.0-1731735965830,5,FailOnTimeoutGroup] 2024-11-16T05:46:05,834 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.small.0-1731735965832,5,FailOnTimeoutGroup] 2024-11-16T05:46:05,835 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T05:46:05,835 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T05:46:05,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741831_1007 (size=1321) 2024-11-16T05:46:05,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741831_1007 (size=1321) 2024-11-16T05:46:05,838 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T05:46:05,838 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T05:46:05,839 INFO [PEWorker-2 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T05:46:05,839 INFO [PEWorker-2 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2 2024-11-16T05:46:05,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741832_1008 (size=32) 2024-11-16T05:46:05,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741832_1008 (size=32) 2024-11-16T05:46:05,854 DEBUG [PEWorker-2 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:46:05,856 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T05:46:05,860 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T05:46:05,860 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:46:05,861 DEBUG [RS:0;3456ee6a3164:36635 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-16T05:46:05,861 WARN [RS:0;3456ee6a3164:36635 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-16T05:46:05,861 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:46:05,862 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T05:46:05,864 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T05:46:05,864 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:46:05,865 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:46:05,866 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T05:46:05,868 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T05:46:05,869 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:46:05,870 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:46:05,870 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T05:46:05,872 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T05:46:05,873 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:46:05,874 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:46:05,874 DEBUG [PEWorker-2 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T05:46:05,876 DEBUG [PEWorker-2 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/hbase/meta/1588230740 2024-11-16T05:46:05,877 DEBUG [PEWorker-2 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/hbase/meta/1588230740 2024-11-16T05:46:05,880 DEBUG [PEWorker-2 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T05:46:05,880 DEBUG [PEWorker-2 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T05:46:05,881 DEBUG [PEWorker-2 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T05:46:05,885 DEBUG [PEWorker-2 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T05:46:05,893 DEBUG [PEWorker-2 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T05:46:05,895 INFO [PEWorker-2 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=800569, jitterRate=0.017976194620132446}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T05:46:05,898 DEBUG [PEWorker-2 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731735965854Initializing all the Stores at 1731735965856 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731735965856Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731735965856Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731735965856Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731735965856Cleaning up temporary data from old regions at 1731735965880 (+24 ms)Region opened successfully at 1731735965898 (+18 ms) 2024-11-16T05:46:05,899 DEBUG [PEWorker-2 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T05:46:05,899 INFO [PEWorker-2 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T05:46:05,899 DEBUG [PEWorker-2 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T05:46:05,899 DEBUG [PEWorker-2 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T05:46:05,899 DEBUG [PEWorker-2 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T05:46:05,901 INFO [PEWorker-2 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T05:46:05,901 DEBUG [PEWorker-2 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731735965899Disabling compacts and flushes for region at 1731735965899Disabling writes for close at 1731735965899Writing region close event to WAL at 1731735965900 (+1 ms)Closed at 1731735965900 2024-11-16T05:46:05,904 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T05:46:05,905 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T05:46:05,911 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T05:46:05,919 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T05:46:05,922 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T05:46:05,962 INFO [RS:0;3456ee6a3164:36635 {}] regionserver.HRegionServer(2659): reportForDuty to master=3456ee6a3164,43225,1731735963837 with port=36635, startcode=1731735964677 2024-11-16T05:46:05,965 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43225 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3456ee6a3164,36635,1731735964677 2024-11-16T05:46:05,967 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43225 {}] master.ServerManager(517): Registering regionserver=3456ee6a3164,36635,1731735964677 2024-11-16T05:46:05,974 DEBUG [RS:0;3456ee6a3164:36635 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2 2024-11-16T05:46:05,974 DEBUG [RS:0;3456ee6a3164:36635 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42921 2024-11-16T05:46:05,975 DEBUG [RS:0;3456ee6a3164:36635 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T05:46:05,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43225-0x100471121b10000, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T05:46:05,980 DEBUG [RS:0;3456ee6a3164:36635 {}] zookeeper.ZKUtil(111): regionserver:36635-0x100471121b10001, quorum=127.0.0.1:55961, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3456ee6a3164,36635,1731735964677 2024-11-16T05:46:05,980 WARN [RS:0;3456ee6a3164:36635 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T05:46:05,981 INFO [RS:0;3456ee6a3164:36635 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T05:46:05,981 DEBUG [RS:0;3456ee6a3164:36635 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677 2024-11-16T05:46:05,983 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3456ee6a3164,36635,1731735964677] 2024-11-16T05:46:06,007 INFO [RS:0;3456ee6a3164:36635 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T05:46:06,022 INFO [RS:0;3456ee6a3164:36635 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T05:46:06,027 INFO [RS:0;3456ee6a3164:36635 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T05:46:06,027 INFO [RS:0;3456ee6a3164:36635 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T05:46:06,028 INFO [RS:0;3456ee6a3164:36635 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T05:46:06,033 INFO [RS:0;3456ee6a3164:36635 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T05:46:06,034 INFO [RS:0;3456ee6a3164:36635 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T05:46:06,034 DEBUG [RS:0;3456ee6a3164:36635 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:46:06,034 DEBUG [RS:0;3456ee6a3164:36635 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:46:06,035 DEBUG [RS:0;3456ee6a3164:36635 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:46:06,035 DEBUG [RS:0;3456ee6a3164:36635 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:46:06,035 DEBUG [RS:0;3456ee6a3164:36635 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:46:06,035 DEBUG [RS:0;3456ee6a3164:36635 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3456ee6a3164:0, corePoolSize=2, maxPoolSize=2 2024-11-16T05:46:06,035 DEBUG [RS:0;3456ee6a3164:36635 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:46:06,036 DEBUG [RS:0;3456ee6a3164:36635 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:46:06,036 DEBUG [RS:0;3456ee6a3164:36635 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:46:06,036 DEBUG [RS:0;3456ee6a3164:36635 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:46:06,036 DEBUG [RS:0;3456ee6a3164:36635 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:46:06,036 DEBUG [RS:0;3456ee6a3164:36635 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:46:06,036 DEBUG [RS:0;3456ee6a3164:36635 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0, corePoolSize=3, maxPoolSize=3 2024-11-16T05:46:06,037 DEBUG [RS:0;3456ee6a3164:36635 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0, corePoolSize=3, maxPoolSize=3 2024-11-16T05:46:06,038 INFO [RS:0;3456ee6a3164:36635 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T05:46:06,038 INFO [RS:0;3456ee6a3164:36635 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T05:46:06,039 INFO [RS:0;3456ee6a3164:36635 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T05:46:06,039 INFO [RS:0;3456ee6a3164:36635 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T05:46:06,039 INFO [RS:0;3456ee6a3164:36635 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T05:46:06,039 INFO [RS:0;3456ee6a3164:36635 {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,36635,1731735964677-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T05:46:06,056 INFO [RS:0;3456ee6a3164:36635 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T05:46:06,057 INFO [RS:0;3456ee6a3164:36635 {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,36635,1731735964677-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T05:46:06,058 INFO [RS:0;3456ee6a3164:36635 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:46:06,058 INFO [RS:0;3456ee6a3164:36635 {}] regionserver.Replication(171): 3456ee6a3164,36635,1731735964677 started 2024-11-16T05:46:06,073 WARN [3456ee6a3164:43225 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-16T05:46:06,074 INFO [RS:0;3456ee6a3164:36635 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:46:06,075 INFO [RS:0;3456ee6a3164:36635 {}] regionserver.HRegionServer(1482): Serving as 3456ee6a3164,36635,1731735964677, RpcServer on 3456ee6a3164/172.17.0.2:36635, sessionid=0x100471121b10001 2024-11-16T05:46:06,075 DEBUG [RS:0;3456ee6a3164:36635 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T05:46:06,075 DEBUG [RS:0;3456ee6a3164:36635 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3456ee6a3164,36635,1731735964677 2024-11-16T05:46:06,076 DEBUG [RS:0;3456ee6a3164:36635 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3456ee6a3164,36635,1731735964677' 2024-11-16T05:46:06,076 DEBUG [RS:0;3456ee6a3164:36635 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T05:46:06,077 DEBUG [RS:0;3456ee6a3164:36635 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T05:46:06,077 DEBUG [RS:0;3456ee6a3164:36635 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T05:46:06,078 DEBUG [RS:0;3456ee6a3164:36635 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T05:46:06,078 DEBUG [RS:0;3456ee6a3164:36635 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3456ee6a3164,36635,1731735964677 2024-11-16T05:46:06,078 DEBUG [RS:0;3456ee6a3164:36635 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3456ee6a3164,36635,1731735964677' 2024-11-16T05:46:06,078 DEBUG [RS:0;3456ee6a3164:36635 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T05:46:06,078 DEBUG [RS:0;3456ee6a3164:36635 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T05:46:06,079 DEBUG [RS:0;3456ee6a3164:36635 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T05:46:06,079 INFO [RS:0;3456ee6a3164:36635 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T05:46:06,079 INFO [RS:0;3456ee6a3164:36635 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T05:46:06,190 INFO [RS:0;3456ee6a3164:36635 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3456ee6a3164%2C36635%2C1731735964677, suffix=, logDir=hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677, archiveDir=hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/oldWALs, maxLogs=32 2024-11-16T05:46:06,193 INFO [RS:0;3456ee6a3164:36635 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C36635%2C1731735964677.1731735966193 2024-11-16T05:46:06,202 INFO [RS:0;3456ee6a3164:36635 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731735966193 2024-11-16T05:46:06,203 DEBUG [RS:0;3456ee6a3164:36635 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33489:33489),(127.0.0.1/127.0.0.1:43479:43479)] 2024-11-16T05:46:06,328 DEBUG [3456ee6a3164:43225 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T05:46:06,342 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3456ee6a3164,36635,1731735964677 2024-11-16T05:46:06,348 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3456ee6a3164,36635,1731735964677, state=OPENING 2024-11-16T05:46:06,353 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T05:46:06,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36635-0x100471121b10001, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:46:06,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43225-0x100471121b10000, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:46:06,356 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T05:46:06,356 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T05:46:06,358 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T05:46:06,359 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3456ee6a3164,36635,1731735964677}] 2024-11-16T05:46:06,538 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T05:46:06,541 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55987, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T05:46:06,552 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T05:46:06,553 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T05:46:06,557 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3456ee6a3164%2C36635%2C1731735964677.meta, suffix=.meta, logDir=hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677, archiveDir=hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/oldWALs, maxLogs=32 2024-11-16T05:46:06,559 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C36635%2C1731735964677.meta.1731735966559.meta 2024-11-16T05:46:06,568 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.meta.1731735966559.meta 2024-11-16T05:46:06,569 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43479:43479),(127.0.0.1/127.0.0.1:33489:33489)] 2024-11-16T05:46:06,570 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T05:46:06,571 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T05:46:06,574 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T05:46:06,578 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T05:46:06,582 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T05:46:06,582 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:46:06,583 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T05:46:06,583 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T05:46:06,586 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T05:46:06,587 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T05:46:06,587 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:46:06,588 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:46:06,588 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T05:46:06,590 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T05:46:06,590 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:46:06,591 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:46:06,591 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T05:46:06,592 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T05:46:06,593 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:46:06,594 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:46:06,594 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T05:46:06,595 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T05:46:06,596 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:46:06,596 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:46:06,597 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T05:46:06,598 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/hbase/meta/1588230740 2024-11-16T05:46:06,601 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/hbase/meta/1588230740 2024-11-16T05:46:06,603 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T05:46:06,604 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T05:46:06,604 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T05:46:06,607 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T05:46:06,609 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=826884, jitterRate=0.05143827199935913}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T05:46:06,609 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T05:46:06,610 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731735966583Writing region info on filesystem at 1731735966583Initializing all the Stores at 1731735966585 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731735966585Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731735966585Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731735966585Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731735966585Cleaning up temporary data from old regions at 1731735966604 (+19 ms)Running coprocessor post-open hooks at 1731735966609 (+5 ms)Region opened successfully at 1731735966610 (+1 ms) 2024-11-16T05:46:06,616 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731735966530 2024-11-16T05:46:06,627 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T05:46:06,628 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T05:46:06,630 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,36635,1731735964677 2024-11-16T05:46:06,633 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3456ee6a3164,36635,1731735964677, state=OPEN 2024-11-16T05:46:06,638 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43225-0x100471121b10000, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T05:46:06,638 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36635-0x100471121b10001, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T05:46:06,639 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T05:46:06,639 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T05:46:06,639 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3456ee6a3164,36635,1731735964677 2024-11-16T05:46:06,645 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T05:46:06,646 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3456ee6a3164,36635,1731735964677 in 280 msec 2024-11-16T05:46:06,654 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T05:46:06,654 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 737 msec 2024-11-16T05:46:06,656 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T05:46:06,656 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T05:46:06,674 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T05:46:06,675 DEBUG [PEWorker-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,36635,1731735964677, seqNum=-1] 2024-11-16T05:46:06,696 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T05:46:06,699 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37437, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T05:46:06,719 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 993 msec 2024-11-16T05:46:06,719 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731735966719, completionTime=-1 2024-11-16T05:46:06,721 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T05:46:06,722 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-16T05:46:06,746 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-16T05:46:06,746 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731736026746 2024-11-16T05:46:06,746 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731736086746 2024-11-16T05:46:06,746 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 24 msec 2024-11-16T05:46:06,749 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,43225,1731735963837-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T05:46:06,750 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,43225,1731735963837-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:46:06,750 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,43225,1731735963837-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:46:06,752 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3456ee6a3164:43225, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:46:06,752 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T05:46:06,753 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T05:46:06,758 DEBUG [master/3456ee6a3164:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T05:46:06,779 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.908sec 2024-11-16T05:46:06,780 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T05:46:06,781 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T05:46:06,782 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T05:46:06,783 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T05:46:06,783 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T05:46:06,784 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,43225,1731735963837-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T05:46:06,784 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,43225,1731735963837-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T05:46:06,792 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T05:46:06,793 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T05:46:06,794 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,43225,1731735963837-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:46:06,825 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@787d320a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T05:46:06,827 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-16T05:46:06,827 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-16T05:46:06,830 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,43225,-1 for getting cluster id 2024-11-16T05:46:06,833 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T05:46:06,840 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'b20453a3-5349-40b5-8c29-f73915424e44' 2024-11-16T05:46:06,843 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T05:46:06,843 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "b20453a3-5349-40b5-8c29-f73915424e44" 2024-11-16T05:46:06,845 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6edca4ae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T05:46:06,845 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,43225,-1] 2024-11-16T05:46:06,848 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T05:46:06,850 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:46:06,851 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50646, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T05:46:06,854 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@791a874b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T05:46:06,855 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T05:46:06,863 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,36635,1731735964677, seqNum=-1] 2024-11-16T05:46:06,864 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T05:46:06,866 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40406, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T05:46:06,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3456ee6a3164,43225,1731735963837 2024-11-16T05:46:06,885 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:46:06,892 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T05:46:06,896 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-16T05:46:06,901 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 3456ee6a3164,43225,1731735963837 2024-11-16T05:46:06,918 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@52246f65 2024-11-16T05:46:06,919 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-16T05:46:06,922 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50652, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-16T05:46:06,925 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43225 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-16T05:46:06,925 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43225 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-16T05:46:06,930 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43225 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T05:46:06,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43225 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-16T05:46:06,944 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T05:46:06,950 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43225 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-16T05:46:06,951 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:46:06,954 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T05:46:06,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43225 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T05:46:06,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741835_1011 (size=389) 2024-11-16T05:46:06,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741835_1011 (size=389) 2024-11-16T05:46:07,000 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 3f35f251583598c42a345289a5f9aa61, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2 2024-11-16T05:46:07,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741836_1012 (size=72) 2024-11-16T05:46:07,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741836_1012 (size=72) 2024-11-16T05:46:07,012 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:46:07,012 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 3f35f251583598c42a345289a5f9aa61, disabling compactions & flushes 2024-11-16T05:46:07,012 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61. 2024-11-16T05:46:07,013 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61. 2024-11-16T05:46:07,013 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61. after waiting 0 ms 2024-11-16T05:46:07,013 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61. 2024-11-16T05:46:07,013 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61. 2024-11-16T05:46:07,013 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 3f35f251583598c42a345289a5f9aa61: Waiting for close lock at 1731735967012Disabling compacts and flushes for region at 1731735967012Disabling writes for close at 1731735967013 (+1 ms)Writing region close event to WAL at 1731735967013Closed at 1731735967013 2024-11-16T05:46:07,015 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T05:46:07,020 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731735967015"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731735967015"}]},"ts":"1731735967015"} 2024-11-16T05:46:07,027 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-16T05:46:07,029 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T05:46:07,031 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731735967029"}]},"ts":"1731735967029"} 2024-11-16T05:46:07,035 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-16T05:46:07,037 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=3f35f251583598c42a345289a5f9aa61, ASSIGN}] 2024-11-16T05:46:07,040 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=3f35f251583598c42a345289a5f9aa61, ASSIGN 2024-11-16T05:46:07,041 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=3f35f251583598c42a345289a5f9aa61, ASSIGN; state=OFFLINE, location=3456ee6a3164,36635,1731735964677; forceNewPlan=false, retain=false 2024-11-16T05:46:07,195 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3f35f251583598c42a345289a5f9aa61, regionState=OPENING, regionLocation=3456ee6a3164,36635,1731735964677 2024-11-16T05:46:07,202 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=3f35f251583598c42a345289a5f9aa61, ASSIGN because future has completed 2024-11-16T05:46:07,204 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3f35f251583598c42a345289a5f9aa61, server=3456ee6a3164,36635,1731735964677}] 2024-11-16T05:46:07,368 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61. 2024-11-16T05:46:07,368 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 3f35f251583598c42a345289a5f9aa61, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61.', STARTKEY => '', ENDKEY => ''} 2024-11-16T05:46:07,369 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 3f35f251583598c42a345289a5f9aa61 2024-11-16T05:46:07,369 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:46:07,370 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 3f35f251583598c42a345289a5f9aa61 2024-11-16T05:46:07,370 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 3f35f251583598c42a345289a5f9aa61 2024-11-16T05:46:07,373 INFO [StoreOpener-3f35f251583598c42a345289a5f9aa61-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 3f35f251583598c42a345289a5f9aa61 2024-11-16T05:46:07,376 INFO [StoreOpener-3f35f251583598c42a345289a5f9aa61-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3f35f251583598c42a345289a5f9aa61 columnFamilyName info 2024-11-16T05:46:07,376 DEBUG [StoreOpener-3f35f251583598c42a345289a5f9aa61-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:46:07,377 INFO [StoreOpener-3f35f251583598c42a345289a5f9aa61-1 {}] regionserver.HStore(327): Store=3f35f251583598c42a345289a5f9aa61/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T05:46:07,377 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 3f35f251583598c42a345289a5f9aa61 2024-11-16T05:46:07,379 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61 2024-11-16T05:46:07,380 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61 2024-11-16T05:46:07,381 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 3f35f251583598c42a345289a5f9aa61 2024-11-16T05:46:07,381 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 3f35f251583598c42a345289a5f9aa61 2024-11-16T05:46:07,384 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 3f35f251583598c42a345289a5f9aa61 2024-11-16T05:46:07,388 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T05:46:07,389 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 3f35f251583598c42a345289a5f9aa61; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=714929, jitterRate=-0.09092168509960175}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T05:46:07,389 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3f35f251583598c42a345289a5f9aa61 2024-11-16T05:46:07,390 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 3f35f251583598c42a345289a5f9aa61: Running coprocessor pre-open hook at 1731735967370Writing region info on filesystem at 1731735967370Initializing all the Stores at 1731735967373 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731735967373Cleaning up temporary data from old regions at 1731735967381 (+8 ms)Running coprocessor post-open hooks at 1731735967389 (+8 ms)Region opened successfully at 1731735967390 (+1 ms) 2024-11-16T05:46:07,392 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61., pid=6, masterSystemTime=1731735967358 2024-11-16T05:46:07,395 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61. 2024-11-16T05:46:07,395 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61. 2024-11-16T05:46:07,397 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3f35f251583598c42a345289a5f9aa61, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,36635,1731735964677 2024-11-16T05:46:07,401 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3f35f251583598c42a345289a5f9aa61, server=3456ee6a3164,36635,1731735964677 because future has completed 2024-11-16T05:46:07,409 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-16T05:46:07,412 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 3f35f251583598c42a345289a5f9aa61, server=3456ee6a3164,36635,1731735964677 in 200 msec 2024-11-16T05:46:07,416 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-16T05:46:07,416 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=3f35f251583598c42a345289a5f9aa61, ASSIGN in 372 msec 2024-11-16T05:46:07,418 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T05:46:07,418 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731735967418"}]},"ts":"1731735967418"} 2024-11-16T05:46:07,422 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-16T05:46:07,424 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T05:46:07,429 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 490 msec 2024-11-16T05:46:12,092 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-16T05:46:12,147 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T05:46:12,149 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-16T05:46:14,306 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T05:46:14,307 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-16T05:46:14,312 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-16T05:46:14,312 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-16T05:46:14,313 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T05:46:14,314 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-16T05:46:14,314 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-16T05:46:14,314 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-16T05:46:17,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43225 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T05:46:17,051 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-16T05:46:17,055 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-16T05:46:17,064 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-16T05:46:17,065 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61. 2024-11-16T05:46:17,066 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C36635%2C1731735964677.1731735977066 2024-11-16T05:46:17,076 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:46:17,076 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:46:17,076 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:46:17,076 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:46:17,076 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:46:17,077 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731735966193 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731735977066 2024-11-16T05:46:17,078 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43479:43479),(127.0.0.1/127.0.0.1:33489:33489)] 2024-11-16T05:46:17,078 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731735966193 is not closed yet, will try archiving it next time 2024-11-16T05:46:17,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741833_1009 (size=451) 2024-11-16T05:46:17,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741833_1009 (size=451) 2024-11-16T05:46:17,081 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731735966193 to hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/oldWALs/3456ee6a3164%2C36635%2C1731735964677.1731735966193 2024-11-16T05:46:17,087 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61., hostname=3456ee6a3164,36635,1731735964677, seqNum=2] 2024-11-16T05:46:29,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36635 {}] regionserver.HRegion(8855): Flush requested on 3f35f251583598c42a345289a5f9aa61 2024-11-16T05:46:29,130 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3f35f251583598c42a345289a5f9aa61 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T05:46:29,185 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/.tmp/info/f38ee0b62baa4776b9ba9509409d5881 is 1080, key is row0001/info:/1731735977090/Put/seqid=0 2024-11-16T05:46:29,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741838_1014 (size=12509) 2024-11-16T05:46:29,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741838_1014 (size=12509) 2024-11-16T05:46:29,198 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/.tmp/info/f38ee0b62baa4776b9ba9509409d5881 2024-11-16T05:46:29,244 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/.tmp/info/f38ee0b62baa4776b9ba9509409d5881 as hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/info/f38ee0b62baa4776b9ba9509409d5881 2024-11-16T05:46:29,255 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/info/f38ee0b62baa4776b9ba9509409d5881, entries=7, sequenceid=11, filesize=12.2 K 2024-11-16T05:46:29,264 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 3f35f251583598c42a345289a5f9aa61 in 133ms, sequenceid=11, compaction requested=false 2024-11-16T05:46:29,265 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3f35f251583598c42a345289a5f9aa61: 2024-11-16T05:46:32,864 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T05:46:37,147 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C36635%2C1731735964677.1731735997146 2024-11-16T05:46:37,365 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 213 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39803,DS-4ba8fc81-4ccf-4411-993b-67e94ea76f3e,DISK], DatanodeInfoWithStorage[127.0.0.1:37001,DS-a60706d9-c967-4b0b-807e-ab71279d41dc,DISK]] 2024-11-16T05:46:37,366 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:46:37,366 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:46:37,366 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:46:37,366 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:46:37,367 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:46:37,367 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731735977066 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731735997146 2024-11-16T05:46:37,368 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33489:33489),(127.0.0.1/127.0.0.1:43479:43479)] 2024-11-16T05:46:37,368 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731735977066 is not closed yet, will try archiving it next time 2024-11-16T05:46:37,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741837_1013 (size=12399) 2024-11-16T05:46:37,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741837_1013 (size=12399) 2024-11-16T05:46:37,571 INFO [FSHLog-0-hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2-prefix:3456ee6a3164,36635,1731735964677 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37001,DS-a60706d9-c967-4b0b-807e-ab71279d41dc,DISK], DatanodeInfoWithStorage[127.0.0.1:39803,DS-4ba8fc81-4ccf-4411-993b-67e94ea76f3e,DISK]] 2024-11-16T05:46:39,775 INFO [FSHLog-0-hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2-prefix:3456ee6a3164,36635,1731735964677 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37001,DS-a60706d9-c967-4b0b-807e-ab71279d41dc,DISK], DatanodeInfoWithStorage[127.0.0.1:39803,DS-4ba8fc81-4ccf-4411-993b-67e94ea76f3e,DISK]] 2024-11-16T05:46:41,983 INFO [FSHLog-0-hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2-prefix:3456ee6a3164,36635,1731735964677 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37001,DS-a60706d9-c967-4b0b-807e-ab71279d41dc,DISK], DatanodeInfoWithStorage[127.0.0.1:39803,DS-4ba8fc81-4ccf-4411-993b-67e94ea76f3e,DISK]] 2024-11-16T05:46:44,191 INFO [FSHLog-0-hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2-prefix:3456ee6a3164,36635,1731735964677 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37001,DS-a60706d9-c967-4b0b-807e-ab71279d41dc,DISK], DatanodeInfoWithStorage[127.0.0.1:39803,DS-4ba8fc81-4ccf-4411-993b-67e94ea76f3e,DISK]] 2024-11-16T05:46:44,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36635 {}] regionserver.HRegion(8855): Flush requested on 3f35f251583598c42a345289a5f9aa61 2024-11-16T05:46:44,192 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3f35f251583598c42a345289a5f9aa61 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T05:46:44,395 INFO [FSHLog-0-hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2-prefix:3456ee6a3164,36635,1731735964677 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37001,DS-a60706d9-c967-4b0b-807e-ab71279d41dc,DISK], DatanodeInfoWithStorage[127.0.0.1:39803,DS-4ba8fc81-4ccf-4411-993b-67e94ea76f3e,DISK]] 2024-11-16T05:46:44,403 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/.tmp/info/ff19436c3e7546d8b6337392085a5911 is 1080, key is row0008/info:/1731735991128/Put/seqid=0 2024-11-16T05:46:44,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741840_1016 (size=12509) 2024-11-16T05:46:44,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741840_1016 (size=12509) 2024-11-16T05:46:44,412 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/.tmp/info/ff19436c3e7546d8b6337392085a5911 2024-11-16T05:46:44,423 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/.tmp/info/ff19436c3e7546d8b6337392085a5911 as hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/info/ff19436c3e7546d8b6337392085a5911 2024-11-16T05:46:44,435 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/info/ff19436c3e7546d8b6337392085a5911, entries=7, sequenceid=21, filesize=12.2 K 2024-11-16T05:46:44,638 INFO [FSHLog-0-hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2-prefix:3456ee6a3164,36635,1731735964677 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37001,DS-a60706d9-c967-4b0b-807e-ab71279d41dc,DISK], DatanodeInfoWithStorage[127.0.0.1:39803,DS-4ba8fc81-4ccf-4411-993b-67e94ea76f3e,DISK]] 2024-11-16T05:46:44,639 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 3f35f251583598c42a345289a5f9aa61 in 447ms, sequenceid=21, compaction requested=false 2024-11-16T05:46:44,639 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3f35f251583598c42a345289a5f9aa61: 2024-11-16T05:46:44,640 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-16T05:46:44,640 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:46:44,642 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/info/f38ee0b62baa4776b9ba9509409d5881 because midkey is the same as first or last row 2024-11-16T05:46:46,399 INFO [FSHLog-0-hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2-prefix:3456ee6a3164,36635,1731735964677 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37001,DS-a60706d9-c967-4b0b-807e-ab71279d41dc,DISK], DatanodeInfoWithStorage[127.0.0.1:39803,DS-4ba8fc81-4ccf-4411-993b-67e94ea76f3e,DISK]] 2024-11-16T05:46:46,795 INFO [master/3456ee6a3164:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-16T05:46:46,795 INFO [master/3456ee6a3164:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-16T05:46:48,607 INFO [FSHLog-0-hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2-prefix:3456ee6a3164,36635,1731735964677 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37001,DS-a60706d9-c967-4b0b-807e-ab71279d41dc,DISK], DatanodeInfoWithStorage[127.0.0.1:39803,DS-4ba8fc81-4ccf-4411-993b-67e94ea76f3e,DISK]] 2024-11-16T05:46:48,612 WARN [FSHLog-0-hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2-prefix:3456ee6a3164,36635,1731735964677 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37001,DS-a60706d9-c967-4b0b-807e-ab71279d41dc,DISK], DatanodeInfoWithStorage[127.0.0.1:39803,DS-4ba8fc81-4ccf-4411-993b-67e94ea76f3e,DISK]] 2024-11-16T05:46:48,613 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3456ee6a3164%2C36635%2C1731735964677:(num 1731735997146) roll requested 2024-11-16T05:46:48,614 INFO [regionserver/3456ee6a3164:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C36635%2C1731735964677.1731736008614 2024-11-16T05:46:48,824 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 208 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37001,DS-a60706d9-c967-4b0b-807e-ab71279d41dc,DISK], DatanodeInfoWithStorage[127.0.0.1:39803,DS-4ba8fc81-4ccf-4411-993b-67e94ea76f3e,DISK]] 2024-11-16T05:46:48,824 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:46:48,825 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:46:48,825 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:46:48,825 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:46:48,825 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:46:48,825 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731735997146 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731736008614 2024-11-16T05:46:48,826 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33489:33489),(127.0.0.1/127.0.0.1:43479:43479)] 2024-11-16T05:46:48,826 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731735997146 is not closed yet, will try archiving it next time 2024-11-16T05:46:48,826 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731735977066 to hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/oldWALs/3456ee6a3164%2C36635%2C1731735964677.1731735977066 2024-11-16T05:46:48,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741839_1015 (size=7739) 2024-11-16T05:46:48,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741839_1015 (size=7739) 2024-11-16T05:46:50,814 INFO [FSHLog-0-hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2-prefix:3456ee6a3164,36635,1731735964677 {}] wal.AbstractFSWAL(1368): Slow sync cost: 203 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37001,DS-a60706d9-c967-4b0b-807e-ab71279d41dc,DISK], DatanodeInfoWithStorage[127.0.0.1:39803,DS-4ba8fc81-4ccf-4411-993b-67e94ea76f3e,DISK]] 2024-11-16T05:46:52,369 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 3f35f251583598c42a345289a5f9aa61, had cached 0 bytes from a total of 25018 2024-11-16T05:46:53,023 INFO [FSHLog-0-hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2-prefix:3456ee6a3164,36635,1731735964677 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37001,DS-a60706d9-c967-4b0b-807e-ab71279d41dc,DISK], DatanodeInfoWithStorage[127.0.0.1:39803,DS-4ba8fc81-4ccf-4411-993b-67e94ea76f3e,DISK]] 2024-11-16T05:46:55,232 INFO [FSHLog-0-hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2-prefix:3456ee6a3164,36635,1731735964677 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37001,DS-a60706d9-c967-4b0b-807e-ab71279d41dc,DISK], DatanodeInfoWithStorage[127.0.0.1:39803,DS-4ba8fc81-4ccf-4411-993b-67e94ea76f3e,DISK]] 2024-11-16T05:46:57,441 INFO [FSHLog-0-hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2-prefix:3456ee6a3164,36635,1731735964677 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37001,DS-a60706d9-c967-4b0b-807e-ab71279d41dc,DISK], DatanodeInfoWithStorage[127.0.0.1:39803,DS-4ba8fc81-4ccf-4411-993b-67e94ea76f3e,DISK]] 2024-11-16T05:46:59,444 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T05:46:59,444 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C36635%2C1731735964677.1731736019444 2024-11-16T05:47:02,864 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T05:47:04,456 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5007 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37001,DS-a60706d9-c967-4b0b-807e-ab71279d41dc,DISK], DatanodeInfoWithStorage[127.0.0.1:39803,DS-4ba8fc81-4ccf-4411-993b-67e94ea76f3e,DISK]] 2024-11-16T05:47:04,458 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5007 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37001,DS-a60706d9-c967-4b0b-807e-ab71279d41dc,DISK], DatanodeInfoWithStorage[127.0.0.1:39803,DS-4ba8fc81-4ccf-4411-993b-67e94ea76f3e,DISK]] 2024-11-16T05:47:04,458 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3456ee6a3164%2C36635%2C1731735964677:(num 1731736019444) roll requested 2024-11-16T05:47:04,459 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:04,459 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:04,459 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:04,459 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:04,460 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:04,460 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731736008614 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731736019444 2024-11-16T05:47:04,461 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33489:33489),(127.0.0.1/127.0.0.1:43479:43479)] 2024-11-16T05:47:04,461 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731736008614 is not closed yet, will try archiving it next time 2024-11-16T05:47:04,462 INFO [regionserver/3456ee6a3164:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C36635%2C1731735964677.1731736024461 2024-11-16T05:47:04,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741841_1017 (size=4753) 2024-11-16T05:47:04,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741841_1017 (size=4753) 2024-11-16T05:47:09,466 INFO [FSHLog-0-hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2-prefix:3456ee6a3164,36635,1731735964677 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37001,DS-a60706d9-c967-4b0b-807e-ab71279d41dc,DISK], DatanodeInfoWithStorage[127.0.0.1:39803,DS-4ba8fc81-4ccf-4411-993b-67e94ea76f3e,DISK]] 2024-11-16T05:47:09,467 WARN [FSHLog-0-hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2-prefix:3456ee6a3164,36635,1731735964677 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37001,DS-a60706d9-c967-4b0b-807e-ab71279d41dc,DISK], DatanodeInfoWithStorage[127.0.0.1:39803,DS-4ba8fc81-4ccf-4411-993b-67e94ea76f3e,DISK]] 2024-11-16T05:47:09,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36635 {}] regionserver.HRegion(8855): Flush requested on 3f35f251583598c42a345289a5f9aa61 2024-11-16T05:47:09,468 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3f35f251583598c42a345289a5f9aa61 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T05:47:09,476 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5008 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37001,DS-a60706d9-c967-4b0b-807e-ab71279d41dc,DISK], DatanodeInfoWithStorage[127.0.0.1:39803,DS-4ba8fc81-4ccf-4411-993b-67e94ea76f3e,DISK]] 2024-11-16T05:47:09,476 WARN [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5008 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37001,DS-a60706d9-c967-4b0b-807e-ab71279d41dc,DISK], DatanodeInfoWithStorage[127.0.0.1:39803,DS-4ba8fc81-4ccf-4411-993b-67e94ea76f3e,DISK]] 2024-11-16T05:47:11,469 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T05:47:14,472 INFO [FSHLog-0-hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2-prefix:3456ee6a3164,36635,1731735964677 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37001,DS-a60706d9-c967-4b0b-807e-ab71279d41dc,DISK], DatanodeInfoWithStorage[127.0.0.1:39803,DS-4ba8fc81-4ccf-4411-993b-67e94ea76f3e,DISK]] 2024-11-16T05:47:14,472 WARN [FSHLog-0-hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2-prefix:3456ee6a3164,36635,1731735964677 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37001,DS-a60706d9-c967-4b0b-807e-ab71279d41dc,DISK], DatanodeInfoWithStorage[127.0.0.1:39803,DS-4ba8fc81-4ccf-4411-993b-67e94ea76f3e,DISK]] 2024-11-16T05:47:14,473 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:14,473 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:14,474 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:14,475 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:14,475 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:14,476 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731736019444 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731736024461 2024-11-16T05:47:14,478 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33489:33489),(127.0.0.1/127.0.0.1:43479:43479)] 2024-11-16T05:47:14,478 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731736019444 is not closed yet, will try archiving it next time 2024-11-16T05:47:14,478 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3456ee6a3164%2C36635%2C1731735964677:(num 1731736024461) roll requested 2024-11-16T05:47:14,479 INFO [regionserver/3456ee6a3164:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C36635%2C1731735964677.1731736034478 2024-11-16T05:47:14,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741842_1018 (size=1569) 2024-11-16T05:47:14,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741842_1018 (size=1569) 2024-11-16T05:47:14,482 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/.tmp/info/cd4d01eef67547c485aada76746a94f0 is 1080, key is row0015/info:/1731736006196/Put/seqid=0 2024-11-16T05:47:14,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741844_1020 (size=12509) 2024-11-16T05:47:14,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741844_1020 (size=12509) 2024-11-16T05:47:14,489 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/.tmp/info/cd4d01eef67547c485aada76746a94f0 2024-11-16T05:47:14,499 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/.tmp/info/cd4d01eef67547c485aada76746a94f0 as hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/info/cd4d01eef67547c485aada76746a94f0 2024-11-16T05:47:14,509 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/info/cd4d01eef67547c485aada76746a94f0, entries=7, sequenceid=31, filesize=12.2 K 2024-11-16T05:47:19,493 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5011 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37001,DS-a60706d9-c967-4b0b-807e-ab71279d41dc,DISK], DatanodeInfoWithStorage[127.0.0.1:39803,DS-4ba8fc81-4ccf-4411-993b-67e94ea76f3e,DISK]] 2024-11-16T05:47:19,493 WARN [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5011 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37001,DS-a60706d9-c967-4b0b-807e-ab71279d41dc,DISK], DatanodeInfoWithStorage[127.0.0.1:39803,DS-4ba8fc81-4ccf-4411-993b-67e94ea76f3e,DISK]] 2024-11-16T05:47:19,510 INFO [FSHLog-0-hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2-prefix:3456ee6a3164,36635,1731735964677 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37001,DS-a60706d9-c967-4b0b-807e-ab71279d41dc,DISK], DatanodeInfoWithStorage[127.0.0.1:39803,DS-4ba8fc81-4ccf-4411-993b-67e94ea76f3e,DISK]] 2024-11-16T05:47:19,510 WARN [FSHLog-0-hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2-prefix:3456ee6a3164,36635,1731735964677 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37001,DS-a60706d9-c967-4b0b-807e-ab71279d41dc,DISK], DatanodeInfoWithStorage[127.0.0.1:39803,DS-4ba8fc81-4ccf-4411-993b-67e94ea76f3e,DISK]] 2024-11-16T05:47:19,510 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 3f35f251583598c42a345289a5f9aa61 in 10043ms, sequenceid=31, compaction requested=true 2024-11-16T05:47:19,510 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:19,510 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3f35f251583598c42a345289a5f9aa61: 2024-11-16T05:47:19,511 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:19,511 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-16T05:47:19,511 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:19,511 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:47:19,511 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/info/f38ee0b62baa4776b9ba9509409d5881 because midkey is the same as first or last row 2024-11-16T05:47:19,511 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:19,511 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:19,511 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731736024461 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731736034478 2024-11-16T05:47:19,512 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33489:33489),(127.0.0.1/127.0.0.1:43479:43479)] 2024-11-16T05:47:19,512 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731736024461 is not closed yet, will try archiving it next time 2024-11-16T05:47:19,512 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731735997146 to hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/oldWALs/3456ee6a3164%2C36635%2C1731735964677.1731735997146 2024-11-16T05:47:19,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3f35f251583598c42a345289a5f9aa61:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T05:47:19,512 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3456ee6a3164%2C36635%2C1731735964677:(num 1731736039512) roll requested 2024-11-16T05:47:19,513 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C36635%2C1731735964677.1731736039512 2024-11-16T05:47:19,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741843_1019 (size=438) 2024-11-16T05:47:19,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741843_1019 (size=438) 2024-11-16T05:47:19,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T05:47:19,515 DEBUG [RS:0;3456ee6a3164:36635-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T05:47:19,515 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731736008614 to hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/oldWALs/3456ee6a3164%2C36635%2C1731735964677.1731736008614 2024-11-16T05:47:19,517 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731736019444 to hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/oldWALs/3456ee6a3164%2C36635%2C1731735964677.1731736019444 2024-11-16T05:47:19,519 DEBUG [RS:0;3456ee6a3164:36635-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T05:47:19,519 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731736024461 to hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/oldWALs/3456ee6a3164%2C36635%2C1731735964677.1731736024461 2024-11-16T05:47:19,520 DEBUG [RS:0;3456ee6a3164:36635-shortCompactions-0 {}] regionserver.HStore(1541): 3f35f251583598c42a345289a5f9aa61/info is initiating minor compaction (all files) 2024-11-16T05:47:19,521 INFO [RS:0;3456ee6a3164:36635-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 3f35f251583598c42a345289a5f9aa61/info in TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61. 2024-11-16T05:47:19,521 INFO [RS:0;3456ee6a3164:36635-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/info/f38ee0b62baa4776b9ba9509409d5881, hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/info/ff19436c3e7546d8b6337392085a5911, hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/info/cd4d01eef67547c485aada76746a94f0] into tmpdir=hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/.tmp, totalSize=36.6 K 2024-11-16T05:47:19,522 DEBUG [RS:0;3456ee6a3164:36635-shortCompactions-0 {}] compactions.Compactor(225): Compacting f38ee0b62baa4776b9ba9509409d5881, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731735977090 2024-11-16T05:47:19,523 DEBUG [RS:0;3456ee6a3164:36635-shortCompactions-0 {}] compactions.Compactor(225): Compacting ff19436c3e7546d8b6337392085a5911, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731735991128 2024-11-16T05:47:19,524 DEBUG [RS:0;3456ee6a3164:36635-shortCompactions-0 {}] compactions.Compactor(225): Compacting cd4d01eef67547c485aada76746a94f0, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731736006196 2024-11-16T05:47:19,525 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:19,525 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:19,525 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:19,525 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:19,525 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:19,526 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731736034478 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731736039512 2024-11-16T05:47:19,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741845_1021 (size=93) 2024-11-16T05:47:19,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741845_1021 (size=93) 2024-11-16T05:47:19,533 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33489:33489),(127.0.0.1/127.0.0.1:43479:43479)] 2024-11-16T05:47:19,534 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731736034478 is not closed yet, will try archiving it next time 2024-11-16T05:47:19,534 INFO [regionserver/3456ee6a3164:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C36635%2C1731735964677.1731736039534 2024-11-16T05:47:19,541 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:19,541 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:19,541 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:19,541 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:19,542 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:19,542 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731736039512 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731736039534 2024-11-16T05:47:19,543 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43479:43479),(127.0.0.1/127.0.0.1:33489:33489)] 2024-11-16T05:47:19,543 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731736034478 is not closed yet, will try archiving it next time 2024-11-16T05:47:19,543 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731736039512 is not closed yet, will try archiving it next time 2024-11-16T05:47:19,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741846_1022 (size=1258) 2024-11-16T05:47:19,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741846_1022 (size=1258) 2024-11-16T05:47:19,545 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731736034478 is not closed yet, will try archiving it next time 2024-11-16T05:47:19,556 INFO [RS:0;3456ee6a3164:36635-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3f35f251583598c42a345289a5f9aa61#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T05:47:19,557 DEBUG [RS:0;3456ee6a3164:36635-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/.tmp/info/7a583b395fa3408fa10daf0f6d0029b6 is 1080, key is row0001/info:/1731735977090/Put/seqid=0 2024-11-16T05:47:19,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741848_1024 (size=27710) 2024-11-16T05:47:19,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741848_1024 (size=27710) 2024-11-16T05:47:19,576 DEBUG [RS:0;3456ee6a3164:36635-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/.tmp/info/7a583b395fa3408fa10daf0f6d0029b6 as hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/info/7a583b395fa3408fa10daf0f6d0029b6 2024-11-16T05:47:19,592 INFO [RS:0;3456ee6a3164:36635-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 3f35f251583598c42a345289a5f9aa61/info of 3f35f251583598c42a345289a5f9aa61 into 7a583b395fa3408fa10daf0f6d0029b6(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T05:47:19,592 DEBUG [RS:0;3456ee6a3164:36635-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 3f35f251583598c42a345289a5f9aa61: 2024-11-16T05:47:19,593 INFO [RS:0;3456ee6a3164:36635-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61., storeName=3f35f251583598c42a345289a5f9aa61/info, priority=13, startTime=1731736039512; duration=0sec 2024-11-16T05:47:19,594 DEBUG [RS:0;3456ee6a3164:36635-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-16T05:47:19,594 DEBUG [RS:0;3456ee6a3164:36635-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:47:19,594 DEBUG [RS:0;3456ee6a3164:36635-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/info/7a583b395fa3408fa10daf0f6d0029b6 because midkey is the same as first or last row 2024-11-16T05:47:19,594 DEBUG [RS:0;3456ee6a3164:36635-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-16T05:47:19,594 DEBUG [RS:0;3456ee6a3164:36635-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:47:19,594 DEBUG [RS:0;3456ee6a3164:36635-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/info/7a583b395fa3408fa10daf0f6d0029b6 because midkey is the same as first or last row 2024-11-16T05:47:19,594 DEBUG [RS:0;3456ee6a3164:36635-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-16T05:47:19,594 DEBUG [RS:0;3456ee6a3164:36635-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:47:19,594 DEBUG [RS:0;3456ee6a3164:36635-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/info/7a583b395fa3408fa10daf0f6d0029b6 because midkey is the same as first or last row 2024-11-16T05:47:19,594 DEBUG [RS:0;3456ee6a3164:36635-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T05:47:19,595 DEBUG [RS:0;3456ee6a3164:36635-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3f35f251583598c42a345289a5f9aa61:info 2024-11-16T05:47:19,930 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/WALs/3456ee6a3164,36635,1731735964677/3456ee6a3164%2C36635%2C1731735964677.1731736034478 to hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/oldWALs/3456ee6a3164%2C36635%2C1731735964677.1731736034478 2024-11-16T05:47:31,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36635 {}] regionserver.HRegion(8855): Flush requested on 3f35f251583598c42a345289a5f9aa61 2024-11-16T05:47:31,569 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3f35f251583598c42a345289a5f9aa61 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T05:47:31,578 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/.tmp/info/d11d8c935eb44fc58b5527059422122f is 1080, key is row0022/info:/1731736039535/Put/seqid=0 2024-11-16T05:47:31,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741849_1025 (size=12509) 2024-11-16T05:47:31,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741849_1025 (size=12509) 2024-11-16T05:47:31,585 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/.tmp/info/d11d8c935eb44fc58b5527059422122f 2024-11-16T05:47:31,594 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/.tmp/info/d11d8c935eb44fc58b5527059422122f as hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/info/d11d8c935eb44fc58b5527059422122f 2024-11-16T05:47:31,604 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/info/d11d8c935eb44fc58b5527059422122f, entries=7, sequenceid=42, filesize=12.2 K 2024-11-16T05:47:31,606 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 3f35f251583598c42a345289a5f9aa61 in 36ms, sequenceid=42, compaction requested=false 2024-11-16T05:47:31,606 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3f35f251583598c42a345289a5f9aa61: 2024-11-16T05:47:31,606 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-16T05:47:31,606 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:47:31,606 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/info/7a583b395fa3408fa10daf0f6d0029b6 because midkey is the same as first or last row 2024-11-16T05:47:32,865 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T05:47:37,370 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 3f35f251583598c42a345289a5f9aa61, had cached 0 bytes from a total of 40219 2024-11-16T05:47:39,592 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T05:47:39,593 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T05:47:39,593 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T05:47:39,602 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:47:39,602 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:47:39,602 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T05:47:39,603 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T05:47:39,603 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1496649153, stopped=false 2024-11-16T05:47:39,603 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3456ee6a3164,43225,1731735963837 2024-11-16T05:47:39,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36635-0x100471121b10001, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T05:47:39,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43225-0x100471121b10000, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T05:47:39,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36635-0x100471121b10001, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:47:39,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43225-0x100471121b10000, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:47:39,605 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T05:47:39,606 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T05:47:39,606 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T05:47:39,606 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43225-0x100471121b10000, quorum=127.0.0.1:55961, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T05:47:39,606 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:47:39,606 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36635-0x100471121b10001, quorum=127.0.0.1:55961, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T05:47:39,607 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3456ee6a3164,36635,1731735964677' ***** 2024-11-16T05:47:39,607 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T05:47:39,607 INFO [RS:0;3456ee6a3164:36635 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T05:47:39,608 INFO [RS:0;3456ee6a3164:36635 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T05:47:39,608 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T05:47:39,608 INFO [RS:0;3456ee6a3164:36635 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T05:47:39,608 INFO [RS:0;3456ee6a3164:36635 {}] regionserver.HRegionServer(3091): Received CLOSE for 3f35f251583598c42a345289a5f9aa61 2024-11-16T05:47:39,608 INFO [RS:0;3456ee6a3164:36635 {}] regionserver.HRegionServer(959): stopping server 3456ee6a3164,36635,1731735964677 2024-11-16T05:47:39,608 INFO [RS:0;3456ee6a3164:36635 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T05:47:39,608 INFO [RS:0;3456ee6a3164:36635 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3456ee6a3164:36635. 2024-11-16T05:47:39,608 DEBUG [RS:0;3456ee6a3164:36635 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T05:47:39,609 DEBUG [RS:0;3456ee6a3164:36635 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:47:39,609 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 3f35f251583598c42a345289a5f9aa61, disabling compactions & flushes 2024-11-16T05:47:39,609 INFO [RS:0;3456ee6a3164:36635 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T05:47:39,609 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61. 2024-11-16T05:47:39,609 INFO [RS:0;3456ee6a3164:36635 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T05:47:39,609 INFO [RS:0;3456ee6a3164:36635 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T05:47:39,609 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61. 2024-11-16T05:47:39,609 INFO [RS:0;3456ee6a3164:36635 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T05:47:39,609 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61. after waiting 0 ms 2024-11-16T05:47:39,609 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61. 2024-11-16T05:47:39,609 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 3f35f251583598c42a345289a5f9aa61 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-16T05:47:39,609 INFO [RS:0;3456ee6a3164:36635 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-16T05:47:39,609 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T05:47:39,609 DEBUG [RS:0;3456ee6a3164:36635 {}] regionserver.HRegionServer(1325): Online Regions={3f35f251583598c42a345289a5f9aa61=TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61., 1588230740=hbase:meta,,1.1588230740} 2024-11-16T05:47:39,609 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T05:47:39,609 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T05:47:39,609 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T05:47:39,610 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T05:47:39,610 DEBUG [RS:0;3456ee6a3164:36635 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 3f35f251583598c42a345289a5f9aa61 2024-11-16T05:47:39,610 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-16T05:47:39,614 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/.tmp/info/62a1a56a29164c518031bbac2fccec72 is 1080, key is row0029/info:/1731736053574/Put/seqid=0 2024-11-16T05:47:39,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741850_1026 (size=8193) 2024-11-16T05:47:39,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741850_1026 (size=8193) 2024-11-16T05:47:39,625 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/.tmp/info/62a1a56a29164c518031bbac2fccec72 2024-11-16T05:47:39,634 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/.tmp/info/62a1a56a29164c518031bbac2fccec72 as hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/info/62a1a56a29164c518031bbac2fccec72 2024-11-16T05:47:39,636 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/hbase/meta/1588230740/.tmp/info/d312cd9ce9564ad5a46c0096dd42f3fe is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61./info:regioninfo/1731735967396/Put/seqid=0 2024-11-16T05:47:39,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741851_1027 (size=7016) 2024-11-16T05:47:39,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741851_1027 (size=7016) 2024-11-16T05:47:39,644 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/hbase/meta/1588230740/.tmp/info/d312cd9ce9564ad5a46c0096dd42f3fe 2024-11-16T05:47:39,645 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/info/62a1a56a29164c518031bbac2fccec72, entries=3, sequenceid=48, filesize=8.0 K 2024-11-16T05:47:39,646 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 3f35f251583598c42a345289a5f9aa61 in 37ms, sequenceid=48, compaction requested=true 2024-11-16T05:47:39,647 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/info/f38ee0b62baa4776b9ba9509409d5881, hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/info/ff19436c3e7546d8b6337392085a5911, hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/info/cd4d01eef67547c485aada76746a94f0] to archive 2024-11-16T05:47:39,650 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T05:47:39,654 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/info/f38ee0b62baa4776b9ba9509409d5881 to hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/archive/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/info/f38ee0b62baa4776b9ba9509409d5881 2024-11-16T05:47:39,655 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/info/ff19436c3e7546d8b6337392085a5911 to hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/archive/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/info/ff19436c3e7546d8b6337392085a5911 2024-11-16T05:47:39,657 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/info/cd4d01eef67547c485aada76746a94f0 to hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/archive/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/info/cd4d01eef67547c485aada76746a94f0 2024-11-16T05:47:39,668 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/hbase/meta/1588230740/.tmp/ns/b2bbbb4474964f72ae50fcf5f9032c18 is 43, key is default/ns:d/1731735966703/Put/seqid=0 2024-11-16T05:47:39,668 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=3456ee6a3164:43225 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-16T05:47:39,669 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [f38ee0b62baa4776b9ba9509409d5881=12509, ff19436c3e7546d8b6337392085a5911=12509, cd4d01eef67547c485aada76746a94f0=12509] 2024-11-16T05:47:39,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741852_1028 (size=5153) 2024-11-16T05:47:39,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741852_1028 (size=5153) 2024-11-16T05:47:39,674 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/hbase/meta/1588230740/.tmp/ns/b2bbbb4474964f72ae50fcf5f9032c18 2024-11-16T05:47:39,674 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/default/TestLogRolling-testSlowSyncLogRolling/3f35f251583598c42a345289a5f9aa61/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-16T05:47:39,676 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61. 2024-11-16T05:47:39,677 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 3f35f251583598c42a345289a5f9aa61: Waiting for close lock at 1731736059608Running coprocessor pre-close hooks at 1731736059609 (+1 ms)Disabling compacts and flushes for region at 1731736059609Disabling writes for close at 1731736059609Obtaining lock to block concurrent updates at 1731736059609Preparing flush snapshotting stores in 3f35f251583598c42a345289a5f9aa61 at 1731736059609Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731736059610 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61. at 1731736059610Flushing 3f35f251583598c42a345289a5f9aa61/info: creating writer at 1731736059610Flushing 3f35f251583598c42a345289a5f9aa61/info: appending metadata at 1731736059614 (+4 ms)Flushing 3f35f251583598c42a345289a5f9aa61/info: closing flushed file at 1731736059614Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7edeaf7e: reopening flushed file at 1731736059633 (+19 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 3f35f251583598c42a345289a5f9aa61 in 37ms, sequenceid=48, compaction requested=true at 1731736059647 (+14 ms)Writing region close event to WAL at 1731736059670 (+23 ms)Running coprocessor post-close hooks at 1731736059675 (+5 ms)Closed at 1731736059676 (+1 ms) 2024-11-16T05:47:39,677 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731735966924.3f35f251583598c42a345289a5f9aa61. 2024-11-16T05:47:39,697 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/hbase/meta/1588230740/.tmp/table/6860d635d1c44fdda038b24815f30385 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731735967418/Put/seqid=0 2024-11-16T05:47:39,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741853_1029 (size=5396) 2024-11-16T05:47:39,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741853_1029 (size=5396) 2024-11-16T05:47:39,810 DEBUG [RS:0;3456ee6a3164:36635 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-16T05:47:40,010 DEBUG [RS:0;3456ee6a3164:36635 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-16T05:47:40,045 INFO [regionserver/3456ee6a3164:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T05:47:40,072 INFO [regionserver/3456ee6a3164:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-16T05:47:40,073 INFO [regionserver/3456ee6a3164:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-16T05:47:40,105 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/hbase/meta/1588230740/.tmp/table/6860d635d1c44fdda038b24815f30385 2024-11-16T05:47:40,119 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/hbase/meta/1588230740/.tmp/info/d312cd9ce9564ad5a46c0096dd42f3fe as hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/hbase/meta/1588230740/info/d312cd9ce9564ad5a46c0096dd42f3fe 2024-11-16T05:47:40,128 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/hbase/meta/1588230740/info/d312cd9ce9564ad5a46c0096dd42f3fe, entries=10, sequenceid=11, filesize=6.9 K 2024-11-16T05:47:40,130 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/hbase/meta/1588230740/.tmp/ns/b2bbbb4474964f72ae50fcf5f9032c18 as hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/hbase/meta/1588230740/ns/b2bbbb4474964f72ae50fcf5f9032c18 2024-11-16T05:47:40,137 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/hbase/meta/1588230740/ns/b2bbbb4474964f72ae50fcf5f9032c18, entries=2, sequenceid=11, filesize=5.0 K 2024-11-16T05:47:40,138 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/hbase/meta/1588230740/.tmp/table/6860d635d1c44fdda038b24815f30385 as hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/hbase/meta/1588230740/table/6860d635d1c44fdda038b24815f30385 2024-11-16T05:47:40,146 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/hbase/meta/1588230740/table/6860d635d1c44fdda038b24815f30385, entries=2, sequenceid=11, filesize=5.3 K 2024-11-16T05:47:40,147 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 537ms, sequenceid=11, compaction requested=false 2024-11-16T05:47:40,153 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-16T05:47:40,154 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T05:47:40,154 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T05:47:40,154 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731736059609Running coprocessor pre-close hooks at 1731736059609Disabling compacts and flushes for region at 1731736059609Disabling writes for close at 1731736059609Obtaining lock to block concurrent updates at 1731736059610 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731736059610Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731736059610Flushing stores of hbase:meta,,1.1588230740 at 1731736059611 (+1 ms)Flushing 1588230740/info: creating writer at 1731736059611Flushing 1588230740/info: appending metadata at 1731736059635 (+24 ms)Flushing 1588230740/info: closing flushed file at 1731736059635Flushing 1588230740/ns: creating writer at 1731736059652 (+17 ms)Flushing 1588230740/ns: appending metadata at 1731736059667 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731736059667Flushing 1588230740/table: creating writer at 1731736059682 (+15 ms)Flushing 1588230740/table: appending metadata at 1731736059696 (+14 ms)Flushing 1588230740/table: closing flushed file at 1731736059696Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5aa67b3b: reopening flushed file at 1731736060118 (+422 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@48b82f43: reopening flushed file at 1731736060129 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@277429f6: reopening flushed file at 1731736060137 (+8 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 537ms, sequenceid=11, compaction requested=false at 1731736060147 (+10 ms)Writing region close event to WAL at 1731736060149 (+2 ms)Running coprocessor post-close hooks at 1731736060154 (+5 ms)Closed at 1731736060154 2024-11-16T05:47:40,154 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T05:47:40,211 INFO [RS:0;3456ee6a3164:36635 {}] regionserver.HRegionServer(976): stopping server 3456ee6a3164,36635,1731735964677; all regions closed. 2024-11-16T05:47:40,212 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:40,212 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:40,213 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:40,213 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:40,213 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:40,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741834_1010 (size=3066) 2024-11-16T05:47:40,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741834_1010 (size=3066) 2024-11-16T05:47:40,221 DEBUG [RS:0;3456ee6a3164:36635 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/oldWALs 2024-11-16T05:47:40,221 INFO [RS:0;3456ee6a3164:36635 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3456ee6a3164%2C36635%2C1731735964677.meta:.meta(num 1731735966559) 2024-11-16T05:47:40,222 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:40,222 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:40,223 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:40,223 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:40,223 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:40,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741847_1023 (size=12695) 2024-11-16T05:47:40,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741847_1023 (size=12695) 2024-11-16T05:47:40,230 DEBUG [RS:0;3456ee6a3164:36635 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/oldWALs 2024-11-16T05:47:40,230 INFO [RS:0;3456ee6a3164:36635 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3456ee6a3164%2C36635%2C1731735964677:(num 1731736039534) 2024-11-16T05:47:40,230 DEBUG [RS:0;3456ee6a3164:36635 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:47:40,230 INFO [RS:0;3456ee6a3164:36635 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T05:47:40,230 INFO [RS:0;3456ee6a3164:36635 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T05:47:40,230 INFO [RS:0;3456ee6a3164:36635 {}] hbase.ChoreService(370): Chore service for: regionserver/3456ee6a3164:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T05:47:40,231 INFO [RS:0;3456ee6a3164:36635 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T05:47:40,231 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T05:47:40,231 INFO [RS:0;3456ee6a3164:36635 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36635 2024-11-16T05:47:40,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36635-0x100471121b10001, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3456ee6a3164,36635,1731735964677 2024-11-16T05:47:40,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43225-0x100471121b10000, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T05:47:40,236 INFO [RS:0;3456ee6a3164:36635 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T05:47:40,238 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3456ee6a3164,36635,1731735964677] 2024-11-16T05:47:40,240 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3456ee6a3164,36635,1731735964677 already deleted, retry=false 2024-11-16T05:47:40,241 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3456ee6a3164,36635,1731735964677 expired; onlineServers=0 2024-11-16T05:47:40,241 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3456ee6a3164,43225,1731735963837' ***** 2024-11-16T05:47:40,241 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T05:47:40,241 INFO [M:0;3456ee6a3164:43225 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T05:47:40,241 INFO [M:0;3456ee6a3164:43225 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T05:47:40,241 DEBUG [M:0;3456ee6a3164:43225 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T05:47:40,241 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T05:47:40,241 DEBUG [M:0;3456ee6a3164:43225 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T05:47:40,241 DEBUG [master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.small.0-1731735965832 {}] cleaner.HFileCleaner(306): Exit Thread[master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.small.0-1731735965832,5,FailOnTimeoutGroup] 2024-11-16T05:47:40,241 DEBUG [master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.large.0-1731735965830 {}] cleaner.HFileCleaner(306): Exit Thread[master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.large.0-1731735965830,5,FailOnTimeoutGroup] 2024-11-16T05:47:40,241 INFO [M:0;3456ee6a3164:43225 {}] hbase.ChoreService(370): Chore service for: master/3456ee6a3164:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T05:47:40,241 INFO [M:0;3456ee6a3164:43225 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T05:47:40,242 DEBUG [M:0;3456ee6a3164:43225 {}] master.HMaster(1795): Stopping service threads 2024-11-16T05:47:40,242 INFO [M:0;3456ee6a3164:43225 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T05:47:40,242 INFO [M:0;3456ee6a3164:43225 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T05:47:40,242 INFO [M:0;3456ee6a3164:43225 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T05:47:40,242 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T05:47:40,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43225-0x100471121b10000, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T05:47:40,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43225-0x100471121b10000, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:47:40,243 DEBUG [M:0;3456ee6a3164:43225 {}] zookeeper.ZKUtil(347): master:43225-0x100471121b10000, quorum=127.0.0.1:55961, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T05:47:40,243 WARN [M:0;3456ee6a3164:43225 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T05:47:40,243 INFO [M:0;3456ee6a3164:43225 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/.lastflushedseqids 2024-11-16T05:47:40,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741854_1030 (size=130) 2024-11-16T05:47:40,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741854_1030 (size=130) 2024-11-16T05:47:40,254 INFO [M:0;3456ee6a3164:43225 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T05:47:40,254 INFO [M:0;3456ee6a3164:43225 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T05:47:40,255 DEBUG [M:0;3456ee6a3164:43225 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T05:47:40,255 INFO [M:0;3456ee6a3164:43225 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:47:40,255 DEBUG [M:0;3456ee6a3164:43225 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:47:40,255 DEBUG [M:0;3456ee6a3164:43225 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T05:47:40,255 DEBUG [M:0;3456ee6a3164:43225 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:47:40,255 INFO [M:0;3456ee6a3164:43225 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-16T05:47:40,272 DEBUG [M:0;3456ee6a3164:43225 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4cb790d7d2d242819fe0bc46473d7091 is 82, key is hbase:meta,,1/info:regioninfo/1731735966629/Put/seqid=0 2024-11-16T05:47:40,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741855_1031 (size=5672) 2024-11-16T05:47:40,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741855_1031 (size=5672) 2024-11-16T05:47:40,279 INFO [M:0;3456ee6a3164:43225 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4cb790d7d2d242819fe0bc46473d7091 2024-11-16T05:47:40,299 DEBUG [M:0;3456ee6a3164:43225 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4e341b6f9d664835b030e2afd16b2a6c is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731735967427/Put/seqid=0 2024-11-16T05:47:40,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741856_1032 (size=6247) 2024-11-16T05:47:40,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741856_1032 (size=6247) 2024-11-16T05:47:40,305 INFO [M:0;3456ee6a3164:43225 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4e341b6f9d664835b030e2afd16b2a6c 2024-11-16T05:47:40,311 INFO [M:0;3456ee6a3164:43225 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 4e341b6f9d664835b030e2afd16b2a6c 2024-11-16T05:47:40,332 DEBUG [M:0;3456ee6a3164:43225 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2bfdeb41e5a34d7880ce1e7c292ef9e6 is 69, key is 3456ee6a3164,36635,1731735964677/rs:state/1731735965969/Put/seqid=0 2024-11-16T05:47:40,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741857_1033 (size=5156) 2024-11-16T05:47:40,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741857_1033 (size=5156) 2024-11-16T05:47:40,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36635-0x100471121b10001, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T05:47:40,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36635-0x100471121b10001, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T05:47:40,339 INFO [RS:0;3456ee6a3164:36635 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T05:47:40,339 INFO [RS:0;3456ee6a3164:36635 {}] regionserver.HRegionServer(1031): Exiting; stopping=3456ee6a3164,36635,1731735964677; zookeeper connection closed. 2024-11-16T05:47:40,339 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2240c514 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2240c514 2024-11-16T05:47:40,340 INFO [M:0;3456ee6a3164:43225 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2bfdeb41e5a34d7880ce1e7c292ef9e6 2024-11-16T05:47:40,340 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T05:47:40,360 DEBUG [M:0;3456ee6a3164:43225 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ece3a23f77bf40d9b6c6cdafb39075ef is 52, key is load_balancer_on/state:d/1731735966889/Put/seqid=0 2024-11-16T05:47:40,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741858_1034 (size=5056) 2024-11-16T05:47:40,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741858_1034 (size=5056) 2024-11-16T05:47:40,368 INFO [M:0;3456ee6a3164:43225 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ece3a23f77bf40d9b6c6cdafb39075ef 2024-11-16T05:47:40,376 DEBUG [M:0;3456ee6a3164:43225 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4cb790d7d2d242819fe0bc46473d7091 as hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4cb790d7d2d242819fe0bc46473d7091 2024-11-16T05:47:40,384 INFO [M:0;3456ee6a3164:43225 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4cb790d7d2d242819fe0bc46473d7091, entries=8, sequenceid=59, filesize=5.5 K 2024-11-16T05:47:40,386 DEBUG [M:0;3456ee6a3164:43225 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4e341b6f9d664835b030e2afd16b2a6c as hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4e341b6f9d664835b030e2afd16b2a6c 2024-11-16T05:47:40,393 INFO [M:0;3456ee6a3164:43225 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 4e341b6f9d664835b030e2afd16b2a6c 2024-11-16T05:47:40,394 INFO [M:0;3456ee6a3164:43225 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4e341b6f9d664835b030e2afd16b2a6c, entries=6, sequenceid=59, filesize=6.1 K 2024-11-16T05:47:40,395 DEBUG [M:0;3456ee6a3164:43225 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2bfdeb41e5a34d7880ce1e7c292ef9e6 as hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2bfdeb41e5a34d7880ce1e7c292ef9e6 2024-11-16T05:47:40,402 INFO [M:0;3456ee6a3164:43225 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2bfdeb41e5a34d7880ce1e7c292ef9e6, entries=1, sequenceid=59, filesize=5.0 K 2024-11-16T05:47:40,404 DEBUG [M:0;3456ee6a3164:43225 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ece3a23f77bf40d9b6c6cdafb39075ef as hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ece3a23f77bf40d9b6c6cdafb39075ef 2024-11-16T05:47:40,411 INFO [M:0;3456ee6a3164:43225 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ece3a23f77bf40d9b6c6cdafb39075ef, entries=1, sequenceid=59, filesize=4.9 K 2024-11-16T05:47:40,412 INFO [M:0;3456ee6a3164:43225 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 157ms, sequenceid=59, compaction requested=false 2024-11-16T05:47:40,414 INFO [M:0;3456ee6a3164:43225 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:47:40,414 DEBUG [M:0;3456ee6a3164:43225 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731736060255Disabling compacts and flushes for region at 1731736060255Disabling writes for close at 1731736060255Obtaining lock to block concurrent updates at 1731736060255Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731736060255Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1731736060256 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731736060256Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731736060256Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731736060272 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731736060272Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731736060284 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731736060298 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731736060298Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731736060312 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731736060331 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731736060332 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731736060346 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731736060360 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731736060360Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@595a5292: reopening flushed file at 1731736060374 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6749b27e: reopening flushed file at 1731736060384 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f1c86ec: reopening flushed file at 1731736060394 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@634f0136: reopening flushed file at 1731736060402 (+8 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 157ms, sequenceid=59, compaction requested=false at 1731736060412 (+10 ms)Writing region close event to WAL at 1731736060414 (+2 ms)Closed at 1731736060414 2024-11-16T05:47:40,415 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:40,415 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:40,415 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:40,415 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:40,416 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:40,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39803 is added to blk_1073741830_1006 (size=27973) 2024-11-16T05:47:40,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37001 is added to blk_1073741830_1006 (size=27973) 2024-11-16T05:47:40,419 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T05:47:40,419 INFO [M:0;3456ee6a3164:43225 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T05:47:40,419 INFO [M:0;3456ee6a3164:43225 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43225 2024-11-16T05:47:40,419 INFO [M:0;3456ee6a3164:43225 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T05:47:40,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43225-0x100471121b10000, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T05:47:40,521 INFO [M:0;3456ee6a3164:43225 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T05:47:40,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43225-0x100471121b10000, quorum=127.0.0.1:55961, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T05:47:40,530 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5e73d4de{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:47:40,533 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@48024e5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T05:47:40,534 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T05:47:40,534 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49288de2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T05:47:40,534 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@123edf60{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/hadoop.log.dir/,STOPPED} 2024-11-16T05:47:40,538 WARN [BP-1268815547-172.17.0.2-1731735960579 heartbeating to localhost/127.0.0.1:42921 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T05:47:40,538 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T05:47:40,538 WARN [BP-1268815547-172.17.0.2-1731735960579 heartbeating to localhost/127.0.0.1:42921 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1268815547-172.17.0.2-1731735960579 (Datanode Uuid 06f41a7e-0c3b-452e-9516-3d78325f0927) service to localhost/127.0.0.1:42921 2024-11-16T05:47:40,538 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T05:47:40,539 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/cluster_36beb1b6-185f-17c2-cec2-b567e1118dec/data/data3/current/BP-1268815547-172.17.0.2-1731735960579 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:47:40,539 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/cluster_36beb1b6-185f-17c2-cec2-b567e1118dec/data/data4/current/BP-1268815547-172.17.0.2-1731735960579 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:47:40,540 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T05:47:40,542 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@705aed2e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:47:40,542 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7f582047{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T05:47:40,542 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T05:47:40,542 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1edc186c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T05:47:40,542 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@522d97dd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/hadoop.log.dir/,STOPPED} 2024-11-16T05:47:40,544 WARN [BP-1268815547-172.17.0.2-1731735960579 heartbeating to localhost/127.0.0.1:42921 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T05:47:40,544 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T05:47:40,544 WARN [BP-1268815547-172.17.0.2-1731735960579 heartbeating to localhost/127.0.0.1:42921 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1268815547-172.17.0.2-1731735960579 (Datanode Uuid 3b4185ea-1c03-4660-bd43-40bedfae8a20) service to localhost/127.0.0.1:42921 2024-11-16T05:47:40,544 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T05:47:40,544 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/cluster_36beb1b6-185f-17c2-cec2-b567e1118dec/data/data1/current/BP-1268815547-172.17.0.2-1731735960579 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:47:40,545 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/cluster_36beb1b6-185f-17c2-cec2-b567e1118dec/data/data2/current/BP-1268815547-172.17.0.2-1731735960579 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:47:40,545 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T05:47:40,558 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c77270f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T05:47:40,559 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c5145e6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T05:47:40,559 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T05:47:40,559 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46a86f8c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T05:47:40,560 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f2ab976{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/hadoop.log.dir/,STOPPED} 2024-11-16T05:47:40,567 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T05:47:40,597 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T05:47:40,605 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=80 (was 12) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42921 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42921 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/3456ee6a3164:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/3456ee6a3164:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: regionserver/3456ee6a3164:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1436593709) connection to localhost/127.0.0.1:42921 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1436593709) connection to localhost/127.0.0.1:42921 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1436593709) connection to localhost/127.0.0.1:42921 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@609afd55 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42921 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:42921 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42921 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=404 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=145 (was 336), ProcessCount=11 (was 11), AvailableMemoryMB=3806 (was 4327) 2024-11-16T05:47:40,611 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=81, OpenFileDescriptor=404, MaxFileDescriptor=1048576, SystemLoadAverage=145, ProcessCount=11, AvailableMemoryMB=3805 2024-11-16T05:47:40,611 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T05:47:40,612 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/hadoop.log.dir so I do NOT create it in target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962 2024-11-16T05:47:40,612 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/481a1ad9-e931-ad2d-20dd-2d27ca1f99f0/hadoop.tmp.dir so I do NOT create it in target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962 2024-11-16T05:47:40,612 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/cluster_49bfe15b-541d-f195-abdd-dae0ce367ef9, deleteOnExit=true 2024-11-16T05:47:40,612 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T05:47:40,612 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/test.cache.data in system properties and HBase conf 2024-11-16T05:47:40,612 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T05:47:40,612 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/hadoop.log.dir in system properties and HBase conf 2024-11-16T05:47:40,612 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T05:47:40,612 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T05:47:40,612 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T05:47:40,613 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T05:47:40,613 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T05:47:40,613 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T05:47:40,613 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T05:47:40,613 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T05:47:40,613 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T05:47:40,614 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T05:47:40,614 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T05:47:40,614 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T05:47:40,614 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T05:47:40,614 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/nfs.dump.dir in system properties and HBase conf 2024-11-16T05:47:40,614 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/java.io.tmpdir in system properties and HBase conf 2024-11-16T05:47:40,614 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T05:47:40,614 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T05:47:40,614 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T05:47:40,628 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T05:47:40,689 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:47:40,696 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T05:47:40,697 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T05:47:40,697 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T05:47:40,697 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T05:47:40,698 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:47:40,698 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@44938570{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/hadoop.log.dir/,AVAILABLE} 2024-11-16T05:47:40,699 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@591c1a6f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T05:47:40,795 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@54c711e7{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/java.io.tmpdir/jetty-localhost-35269-hadoop-hdfs-3_4_1-tests_jar-_-any-2363676648345230596/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T05:47:40,796 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@124e4130{HTTP/1.1, (http/1.1)}{localhost:35269} 2024-11-16T05:47:40,796 INFO [Time-limited test {}] server.Server(415): Started @102494ms 2024-11-16T05:47:40,808 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T05:47:40,864 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:47:40,868 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T05:47:40,868 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T05:47:40,869 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T05:47:40,869 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T05:47:40,869 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4935312d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/hadoop.log.dir/,AVAILABLE} 2024-11-16T05:47:40,869 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a2cf006{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T05:47:40,969 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6bbcfb57{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/java.io.tmpdir/jetty-localhost-41027-hadoop-hdfs-3_4_1-tests_jar-_-any-3130188593194864995/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:47:40,970 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3590efb4{HTTP/1.1, (http/1.1)}{localhost:41027} 2024-11-16T05:47:40,970 INFO [Time-limited test {}] server.Server(415): Started @102668ms 2024-11-16T05:47:40,972 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T05:47:41,012 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:47:41,015 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T05:47:41,016 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T05:47:41,016 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T05:47:41,016 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T05:47:41,017 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c182ba3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/hadoop.log.dir/,AVAILABLE} 2024-11-16T05:47:41,017 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6463767b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T05:47:41,044 WARN [Thread-438 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/cluster_49bfe15b-541d-f195-abdd-dae0ce367ef9/data/data1/current/BP-1161465139-172.17.0.2-1731736060639/current, will proceed with Du for space computation calculation, 2024-11-16T05:47:41,044 WARN [Thread-439 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/cluster_49bfe15b-541d-f195-abdd-dae0ce367ef9/data/data2/current/BP-1161465139-172.17.0.2-1731736060639/current, will proceed with Du for space computation calculation, 2024-11-16T05:47:41,059 WARN [Thread-417 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T05:47:41,062 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x909e77b1d9091820 with lease ID 0xd7c9d5277f40f345: Processing first storage report for DS-f90e2502-b0e2-4a3f-bc37-4c50273eb955 from datanode DatanodeRegistration(127.0.0.1:44989, datanodeUuid=43bb0412-df79-4383-805a-8ad836aa519b, infoPort=39937, infoSecurePort=0, ipcPort=39991, storageInfo=lv=-57;cid=testClusterID;nsid=1939942979;c=1731736060639) 2024-11-16T05:47:41,062 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x909e77b1d9091820 with lease ID 0xd7c9d5277f40f345: from storage DS-f90e2502-b0e2-4a3f-bc37-4c50273eb955 node DatanodeRegistration(127.0.0.1:44989, datanodeUuid=43bb0412-df79-4383-805a-8ad836aa519b, infoPort=39937, infoSecurePort=0, ipcPort=39991, storageInfo=lv=-57;cid=testClusterID;nsid=1939942979;c=1731736060639), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:47:41,062 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x909e77b1d9091820 with lease ID 0xd7c9d5277f40f345: Processing first storage report for DS-026bb2a7-f412-4557-8638-04ca343796de from datanode DatanodeRegistration(127.0.0.1:44989, datanodeUuid=43bb0412-df79-4383-805a-8ad836aa519b, infoPort=39937, infoSecurePort=0, ipcPort=39991, storageInfo=lv=-57;cid=testClusterID;nsid=1939942979;c=1731736060639) 2024-11-16T05:47:41,062 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x909e77b1d9091820 with lease ID 0xd7c9d5277f40f345: from storage DS-026bb2a7-f412-4557-8638-04ca343796de node DatanodeRegistration(127.0.0.1:44989, datanodeUuid=43bb0412-df79-4383-805a-8ad836aa519b, infoPort=39937, infoSecurePort=0, ipcPort=39991, storageInfo=lv=-57;cid=testClusterID;nsid=1939942979;c=1731736060639), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:47:41,115 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5a9a3682{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/java.io.tmpdir/jetty-localhost-40721-hadoop-hdfs-3_4_1-tests_jar-_-any-1869147511920752171/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:47:41,116 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4b589a7d{HTTP/1.1, (http/1.1)}{localhost:40721} 2024-11-16T05:47:41,116 INFO [Time-limited test {}] server.Server(415): Started @102814ms 2024-11-16T05:47:41,117 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T05:47:41,180 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/cluster_49bfe15b-541d-f195-abdd-dae0ce367ef9/data/data4/current/BP-1161465139-172.17.0.2-1731736060639/current, will proceed with Du for space computation calculation, 2024-11-16T05:47:41,180 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/cluster_49bfe15b-541d-f195-abdd-dae0ce367ef9/data/data3/current/BP-1161465139-172.17.0.2-1731736060639/current, will proceed with Du for space computation calculation, 2024-11-16T05:47:41,205 WARN [Thread-453 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T05:47:41,208 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe89a358df16a5d97 with lease ID 0xd7c9d5277f40f346: Processing first storage report for DS-0b08252a-3a02-48ca-b8c0-9beab721d524 from datanode DatanodeRegistration(127.0.0.1:41539, datanodeUuid=198c97ca-fa04-4816-9570-6c849ba6a672, infoPort=46523, infoSecurePort=0, ipcPort=42337, storageInfo=lv=-57;cid=testClusterID;nsid=1939942979;c=1731736060639) 2024-11-16T05:47:41,208 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe89a358df16a5d97 with lease ID 0xd7c9d5277f40f346: from storage DS-0b08252a-3a02-48ca-b8c0-9beab721d524 node DatanodeRegistration(127.0.0.1:41539, datanodeUuid=198c97ca-fa04-4816-9570-6c849ba6a672, infoPort=46523, infoSecurePort=0, ipcPort=42337, storageInfo=lv=-57;cid=testClusterID;nsid=1939942979;c=1731736060639), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:47:41,208 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe89a358df16a5d97 with lease ID 0xd7c9d5277f40f346: Processing first storage report for DS-d4d131d6-6441-42a5-9e26-91dafd1b1b26 from datanode DatanodeRegistration(127.0.0.1:41539, datanodeUuid=198c97ca-fa04-4816-9570-6c849ba6a672, infoPort=46523, infoSecurePort=0, ipcPort=42337, storageInfo=lv=-57;cid=testClusterID;nsid=1939942979;c=1731736060639) 2024-11-16T05:47:41,208 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe89a358df16a5d97 with lease ID 0xd7c9d5277f40f346: from storage DS-d4d131d6-6441-42a5-9e26-91dafd1b1b26 node DatanodeRegistration(127.0.0.1:41539, datanodeUuid=198c97ca-fa04-4816-9570-6c849ba6a672, infoPort=46523, infoSecurePort=0, ipcPort=42337, storageInfo=lv=-57;cid=testClusterID;nsid=1939942979;c=1731736060639), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:47:41,247 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962 2024-11-16T05:47:41,250 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/cluster_49bfe15b-541d-f195-abdd-dae0ce367ef9/zookeeper_0, clientPort=64594, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/cluster_49bfe15b-541d-f195-abdd-dae0ce367ef9/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/cluster_49bfe15b-541d-f195-abdd-dae0ce367ef9/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T05:47:41,251 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64594 2024-11-16T05:47:41,251 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:47:41,253 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:47:41,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41539 is added to blk_1073741825_1001 (size=7) 2024-11-16T05:47:41,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741825_1001 (size=7) 2024-11-16T05:47:41,264 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f with version=8 2024-11-16T05:47:41,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/hbase-staging 2024-11-16T05:47:41,266 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3456ee6a3164:0 server-side Connection retries=45 2024-11-16T05:47:41,266 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T05:47:41,266 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T05:47:41,266 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T05:47:41,266 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T05:47:41,266 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T05:47:41,266 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T05:47:41,266 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T05:47:41,267 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43181 2024-11-16T05:47:41,269 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43181 connecting to ZooKeeper ensemble=127.0.0.1:64594 2024-11-16T05:47:41,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:431810x0, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T05:47:41,273 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43181-0x1004712a1b00000 connected 2024-11-16T05:47:41,292 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:47:41,294 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:47:41,296 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43181-0x1004712a1b00000, quorum=127.0.0.1:64594, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T05:47:41,296 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f, hbase.cluster.distributed=false 2024-11-16T05:47:41,298 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43181-0x1004712a1b00000, quorum=127.0.0.1:64594, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T05:47:41,298 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43181 2024-11-16T05:47:41,301 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43181 2024-11-16T05:47:41,301 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43181 2024-11-16T05:47:41,305 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43181 2024-11-16T05:47:41,305 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43181 2024-11-16T05:47:41,321 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3456ee6a3164:0 server-side Connection retries=45 2024-11-16T05:47:41,321 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T05:47:41,321 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T05:47:41,321 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T05:47:41,321 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T05:47:41,322 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T05:47:41,322 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T05:47:41,322 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T05:47:41,323 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41903 2024-11-16T05:47:41,324 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41903 connecting to ZooKeeper ensemble=127.0.0.1:64594 2024-11-16T05:47:41,325 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:47:41,327 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:47:41,331 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:419030x0, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T05:47:41,332 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:419030x0, quorum=127.0.0.1:64594, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T05:47:41,332 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41903-0x1004712a1b00001 connected 2024-11-16T05:47:41,332 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T05:47:41,335 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T05:47:41,336 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41903-0x1004712a1b00001, quorum=127.0.0.1:64594, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T05:47:41,337 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41903-0x1004712a1b00001, quorum=127.0.0.1:64594, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T05:47:41,339 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41903 2024-11-16T05:47:41,340 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41903 2024-11-16T05:47:41,340 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41903 2024-11-16T05:47:41,342 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41903 2024-11-16T05:47:41,342 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41903 2024-11-16T05:47:41,359 DEBUG [M:0;3456ee6a3164:43181 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3456ee6a3164:43181 2024-11-16T05:47:41,359 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3456ee6a3164,43181,1731736061265 2024-11-16T05:47:41,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41903-0x1004712a1b00001, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T05:47:41,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43181-0x1004712a1b00000, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T05:47:41,362 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43181-0x1004712a1b00000, quorum=127.0.0.1:64594, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3456ee6a3164,43181,1731736061265 2024-11-16T05:47:41,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43181-0x1004712a1b00000, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:47:41,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41903-0x1004712a1b00001, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T05:47:41,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41903-0x1004712a1b00001, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:47:41,363 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43181-0x1004712a1b00000, quorum=127.0.0.1:64594, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T05:47:41,364 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3456ee6a3164,43181,1731736061265 from backup master directory 2024-11-16T05:47:41,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41903-0x1004712a1b00001, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T05:47:41,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43181-0x1004712a1b00000, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3456ee6a3164,43181,1731736061265 2024-11-16T05:47:41,365 WARN [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T05:47:41,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43181-0x1004712a1b00000, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T05:47:41,365 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3456ee6a3164,43181,1731736061265 2024-11-16T05:47:41,370 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/hbase.id] with ID: d024b467-23a3-4a44-8bc5-d01265951a5e 2024-11-16T05:47:41,370 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/.tmp/hbase.id 2024-11-16T05:47:41,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741826_1002 (size=42) 2024-11-16T05:47:41,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41539 is added to blk_1073741826_1002 (size=42) 2024-11-16T05:47:41,377 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/.tmp/hbase.id]:[hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/hbase.id] 2024-11-16T05:47:41,391 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:47:41,391 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T05:47:41,393 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-16T05:47:41,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43181-0x1004712a1b00000, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:47:41,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41903-0x1004712a1b00001, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:47:41,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41539 is added to blk_1073741827_1003 (size=196) 2024-11-16T05:47:41,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741827_1003 (size=196) 2024-11-16T05:47:41,406 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T05:47:41,407 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T05:47:41,407 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T05:47:41,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741828_1004 (size=1189) 2024-11-16T05:47:41,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41539 is added to blk_1073741828_1004 (size=1189) 2024-11-16T05:47:41,420 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/MasterData/data/master/store 2024-11-16T05:47:41,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741829_1005 (size=34) 2024-11-16T05:47:41,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41539 is added to blk_1073741829_1005 (size=34) 2024-11-16T05:47:41,428 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:47:41,428 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T05:47:41,428 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:47:41,428 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:47:41,428 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T05:47:41,429 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:47:41,429 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:47:41,429 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731736061428Disabling compacts and flushes for region at 1731736061428Disabling writes for close at 1731736061428Writing region close event to WAL at 1731736061429 (+1 ms)Closed at 1731736061429 2024-11-16T05:47:41,430 WARN [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/MasterData/data/master/store/.initializing 2024-11-16T05:47:41,430 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/MasterData/WALs/3456ee6a3164,43181,1731736061265 2024-11-16T05:47:41,433 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3456ee6a3164%2C43181%2C1731736061265, suffix=, logDir=hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/MasterData/WALs/3456ee6a3164,43181,1731736061265, archiveDir=hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/MasterData/oldWALs, maxLogs=10 2024-11-16T05:47:41,434 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C43181%2C1731736061265.1731736061433 2024-11-16T05:47:41,439 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/MasterData/WALs/3456ee6a3164,43181,1731736061265/3456ee6a3164%2C43181%2C1731736061265.1731736061433 2024-11-16T05:47:41,441 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39937:39937),(127.0.0.1/127.0.0.1:46523:46523)] 2024-11-16T05:47:41,445 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T05:47:41,445 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:47:41,445 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:47:41,445 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:47:41,447 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:47:41,449 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T05:47:41,449 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:47:41,449 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:47:41,450 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:47:41,451 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T05:47:41,451 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:47:41,452 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T05:47:41,452 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:47:41,454 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T05:47:41,454 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:47:41,455 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T05:47:41,455 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:47:41,457 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T05:47:41,457 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:47:41,457 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T05:47:41,457 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:47:41,458 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:47:41,458 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:47:41,460 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:47:41,460 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:47:41,461 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T05:47:41,462 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:47:41,464 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T05:47:41,465 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=798603, jitterRate=0.015476509928703308}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T05:47:41,466 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731736061446Initializing all the Stores at 1731736061447 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736061447Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736061447Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736061447Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736061447Cleaning up temporary data from old regions at 1731736061460 (+13 ms)Region opened successfully at 1731736061466 (+6 ms) 2024-11-16T05:47:41,467 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T05:47:41,471 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a8cab34, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3456ee6a3164/172.17.0.2:0 2024-11-16T05:47:41,472 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T05:47:41,472 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T05:47:41,472 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T05:47:41,472 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T05:47:41,473 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T05:47:41,473 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T05:47:41,474 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T05:47:41,476 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T05:47:41,477 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43181-0x1004712a1b00000, quorum=127.0.0.1:64594, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T05:47:41,478 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T05:47:41,479 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T05:47:41,479 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43181-0x1004712a1b00000, quorum=127.0.0.1:64594, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T05:47:41,480 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T05:47:41,481 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T05:47:41,482 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43181-0x1004712a1b00000, quorum=127.0.0.1:64594, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T05:47:41,485 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T05:47:41,486 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43181-0x1004712a1b00000, quorum=127.0.0.1:64594, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T05:47:41,487 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T05:47:41,489 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43181-0x1004712a1b00000, quorum=127.0.0.1:64594, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T05:47:41,490 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T05:47:41,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41903-0x1004712a1b00001, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T05:47:41,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43181-0x1004712a1b00000, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T05:47:41,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41903-0x1004712a1b00001, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:47:41,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43181-0x1004712a1b00000, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:47:41,491 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3456ee6a3164,43181,1731736061265, sessionid=0x1004712a1b00000, setting cluster-up flag (Was=false) 2024-11-16T05:47:41,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41903-0x1004712a1b00001, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:47:41,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43181-0x1004712a1b00000, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:47:41,497 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T05:47:41,498 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3456ee6a3164,43181,1731736061265 2024-11-16T05:47:41,501 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43181-0x1004712a1b00000, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:47:41,501 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41903-0x1004712a1b00001, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:47:41,505 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T05:47:41,506 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3456ee6a3164,43181,1731736061265 2024-11-16T05:47:41,508 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T05:47:41,510 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T05:47:41,510 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T05:47:41,510 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T05:47:41,510 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3456ee6a3164,43181,1731736061265 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T05:47:41,512 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3456ee6a3164:0, corePoolSize=5, maxPoolSize=5 2024-11-16T05:47:41,512 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3456ee6a3164:0, corePoolSize=5, maxPoolSize=5 2024-11-16T05:47:41,512 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3456ee6a3164:0, corePoolSize=5, maxPoolSize=5 2024-11-16T05:47:41,513 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3456ee6a3164:0, corePoolSize=5, maxPoolSize=5 2024-11-16T05:47:41,513 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3456ee6a3164:0, corePoolSize=10, maxPoolSize=10 2024-11-16T05:47:41,513 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:41,513 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3456ee6a3164:0, corePoolSize=2, maxPoolSize=2 2024-11-16T05:47:41,513 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:41,514 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731736091514 2024-11-16T05:47:41,514 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T05:47:41,515 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T05:47:41,515 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T05:47:41,515 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T05:47:41,515 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T05:47:41,515 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T05:47:41,515 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:41,516 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T05:47:41,516 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T05:47:41,516 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T05:47:41,516 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T05:47:41,516 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T05:47:41,516 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T05:47:41,517 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T05:47:41,517 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.large.0-1731736061517,5,FailOnTimeoutGroup] 2024-11-16T05:47:41,517 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.small.0-1731736061517,5,FailOnTimeoutGroup] 2024-11-16T05:47:41,517 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:41,517 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T05:47:41,517 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:41,517 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:41,518 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:47:41,518 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T05:47:41,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741831_1007 (size=1321) 2024-11-16T05:47:41,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41539 is added to blk_1073741831_1007 (size=1321) 2024-11-16T05:47:41,527 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T05:47:41,528 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f 2024-11-16T05:47:41,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741832_1008 (size=32) 2024-11-16T05:47:41,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41539 is added to blk_1073741832_1008 (size=32) 2024-11-16T05:47:41,538 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:47:41,540 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T05:47:41,542 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T05:47:41,542 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:47:41,542 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:47:41,543 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T05:47:41,544 INFO [RS:0;3456ee6a3164:41903 {}] regionserver.HRegionServer(746): ClusterId : d024b467-23a3-4a44-8bc5-d01265951a5e 2024-11-16T05:47:41,544 DEBUG [RS:0;3456ee6a3164:41903 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T05:47:41,545 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T05:47:41,545 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:47:41,545 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:47:41,546 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T05:47:41,546 DEBUG [RS:0;3456ee6a3164:41903 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T05:47:41,546 DEBUG [RS:0;3456ee6a3164:41903 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T05:47:41,547 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T05:47:41,547 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:47:41,548 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:47:41,548 DEBUG [RS:0;3456ee6a3164:41903 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T05:47:41,548 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T05:47:41,549 DEBUG [RS:0;3456ee6a3164:41903 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e448cca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3456ee6a3164/172.17.0.2:0 2024-11-16T05:47:41,550 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T05:47:41,550 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:47:41,550 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:47:41,550 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T05:47:41,551 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/data/hbase/meta/1588230740 2024-11-16T05:47:41,552 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/data/hbase/meta/1588230740 2024-11-16T05:47:41,553 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T05:47:41,553 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T05:47:41,554 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T05:47:41,555 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T05:47:41,557 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T05:47:41,558 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=700003, jitterRate=-0.10990025103092194}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T05:47:41,559 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731736061538Initializing all the Stores at 1731736061539 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736061539Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736061540 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736061540Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736061540Cleaning up temporary data from old regions at 1731736061553 (+13 ms)Region opened successfully at 1731736061559 (+6 ms) 2024-11-16T05:47:41,559 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T05:47:41,559 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T05:47:41,559 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T05:47:41,559 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T05:47:41,559 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T05:47:41,559 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T05:47:41,559 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731736061559Disabling compacts and flushes for region at 1731736061559Disabling writes for close at 1731736061559Writing region close event to WAL at 1731736061559Closed at 1731736061559 2024-11-16T05:47:41,561 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T05:47:41,561 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T05:47:41,561 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T05:47:41,563 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T05:47:41,565 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T05:47:41,567 DEBUG [RS:0;3456ee6a3164:41903 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3456ee6a3164:41903 2024-11-16T05:47:41,567 INFO [RS:0;3456ee6a3164:41903 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T05:47:41,567 INFO [RS:0;3456ee6a3164:41903 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T05:47:41,567 DEBUG [RS:0;3456ee6a3164:41903 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T05:47:41,568 INFO [RS:0;3456ee6a3164:41903 {}] regionserver.HRegionServer(2659): reportForDuty to master=3456ee6a3164,43181,1731736061265 with port=41903, startcode=1731736061321 2024-11-16T05:47:41,568 DEBUG [RS:0;3456ee6a3164:41903 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T05:47:41,571 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33693, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T05:47:41,571 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43181 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3456ee6a3164,41903,1731736061321 2024-11-16T05:47:41,571 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43181 {}] master.ServerManager(517): Registering regionserver=3456ee6a3164,41903,1731736061321 2024-11-16T05:47:41,573 DEBUG [RS:0;3456ee6a3164:41903 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f 2024-11-16T05:47:41,574 DEBUG [RS:0;3456ee6a3164:41903 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46059 2024-11-16T05:47:41,574 DEBUG [RS:0;3456ee6a3164:41903 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T05:47:41,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43181-0x1004712a1b00000, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T05:47:41,576 DEBUG [RS:0;3456ee6a3164:41903 {}] zookeeper.ZKUtil(111): regionserver:41903-0x1004712a1b00001, quorum=127.0.0.1:64594, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3456ee6a3164,41903,1731736061321 2024-11-16T05:47:41,576 WARN [RS:0;3456ee6a3164:41903 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T05:47:41,576 INFO [RS:0;3456ee6a3164:41903 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T05:47:41,576 DEBUG [RS:0;3456ee6a3164:41903 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/WALs/3456ee6a3164,41903,1731736061321 2024-11-16T05:47:41,576 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3456ee6a3164,41903,1731736061321] 2024-11-16T05:47:41,584 INFO [RS:0;3456ee6a3164:41903 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T05:47:41,587 INFO [RS:0;3456ee6a3164:41903 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T05:47:41,589 INFO [RS:0;3456ee6a3164:41903 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T05:47:41,589 INFO [RS:0;3456ee6a3164:41903 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:41,590 INFO [RS:0;3456ee6a3164:41903 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T05:47:41,591 INFO [RS:0;3456ee6a3164:41903 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T05:47:41,591 INFO [RS:0;3456ee6a3164:41903 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:41,591 DEBUG [RS:0;3456ee6a3164:41903 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:41,591 DEBUG [RS:0;3456ee6a3164:41903 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:41,591 DEBUG [RS:0;3456ee6a3164:41903 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:41,591 DEBUG [RS:0;3456ee6a3164:41903 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:41,591 DEBUG [RS:0;3456ee6a3164:41903 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:41,591 DEBUG [RS:0;3456ee6a3164:41903 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3456ee6a3164:0, corePoolSize=2, maxPoolSize=2 2024-11-16T05:47:41,591 DEBUG [RS:0;3456ee6a3164:41903 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:41,591 DEBUG [RS:0;3456ee6a3164:41903 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:41,592 DEBUG [RS:0;3456ee6a3164:41903 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:41,592 DEBUG [RS:0;3456ee6a3164:41903 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:41,592 DEBUG [RS:0;3456ee6a3164:41903 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:41,592 DEBUG [RS:0;3456ee6a3164:41903 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:41,592 DEBUG [RS:0;3456ee6a3164:41903 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0, corePoolSize=3, maxPoolSize=3 2024-11-16T05:47:41,592 DEBUG [RS:0;3456ee6a3164:41903 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0, corePoolSize=3, maxPoolSize=3 2024-11-16T05:47:41,592 INFO [RS:0;3456ee6a3164:41903 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:41,592 INFO [RS:0;3456ee6a3164:41903 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:41,593 INFO [RS:0;3456ee6a3164:41903 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:41,593 INFO [RS:0;3456ee6a3164:41903 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:41,593 INFO [RS:0;3456ee6a3164:41903 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:41,593 INFO [RS:0;3456ee6a3164:41903 {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,41903,1731736061321-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T05:47:41,610 INFO [RS:0;3456ee6a3164:41903 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T05:47:41,610 INFO [RS:0;3456ee6a3164:41903 {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,41903,1731736061321-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:41,610 INFO [RS:0;3456ee6a3164:41903 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:41,610 INFO [RS:0;3456ee6a3164:41903 {}] regionserver.Replication(171): 3456ee6a3164,41903,1731736061321 started 2024-11-16T05:47:41,624 INFO [RS:0;3456ee6a3164:41903 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:41,624 INFO [RS:0;3456ee6a3164:41903 {}] regionserver.HRegionServer(1482): Serving as 3456ee6a3164,41903,1731736061321, RpcServer on 3456ee6a3164/172.17.0.2:41903, sessionid=0x1004712a1b00001 2024-11-16T05:47:41,624 DEBUG [RS:0;3456ee6a3164:41903 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T05:47:41,624 DEBUG [RS:0;3456ee6a3164:41903 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3456ee6a3164,41903,1731736061321 2024-11-16T05:47:41,624 DEBUG [RS:0;3456ee6a3164:41903 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3456ee6a3164,41903,1731736061321' 2024-11-16T05:47:41,624 DEBUG [RS:0;3456ee6a3164:41903 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T05:47:41,625 DEBUG [RS:0;3456ee6a3164:41903 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T05:47:41,626 DEBUG [RS:0;3456ee6a3164:41903 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T05:47:41,626 DEBUG [RS:0;3456ee6a3164:41903 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T05:47:41,626 DEBUG [RS:0;3456ee6a3164:41903 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3456ee6a3164,41903,1731736061321 2024-11-16T05:47:41,626 DEBUG [RS:0;3456ee6a3164:41903 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3456ee6a3164,41903,1731736061321' 2024-11-16T05:47:41,626 DEBUG [RS:0;3456ee6a3164:41903 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T05:47:41,627 DEBUG [RS:0;3456ee6a3164:41903 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T05:47:41,627 DEBUG [RS:0;3456ee6a3164:41903 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T05:47:41,627 INFO [RS:0;3456ee6a3164:41903 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T05:47:41,627 INFO [RS:0;3456ee6a3164:41903 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T05:47:41,716 WARN [3456ee6a3164:43181 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-16T05:47:41,730 INFO [RS:0;3456ee6a3164:41903 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3456ee6a3164%2C41903%2C1731736061321, suffix=, logDir=hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/WALs/3456ee6a3164,41903,1731736061321, archiveDir=hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/oldWALs, maxLogs=32 2024-11-16T05:47:41,733 INFO [RS:0;3456ee6a3164:41903 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C41903%2C1731736061321.1731736061733 2024-11-16T05:47:41,743 INFO [RS:0;3456ee6a3164:41903 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/WALs/3456ee6a3164,41903,1731736061321/3456ee6a3164%2C41903%2C1731736061321.1731736061733 2024-11-16T05:47:41,744 DEBUG [RS:0;3456ee6a3164:41903 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39937:39937),(127.0.0.1/127.0.0.1:46523:46523)] 2024-11-16T05:47:41,966 DEBUG [3456ee6a3164:43181 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T05:47:41,967 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3456ee6a3164,41903,1731736061321 2024-11-16T05:47:41,972 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3456ee6a3164,41903,1731736061321, state=OPENING 2024-11-16T05:47:41,975 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T05:47:41,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43181-0x1004712a1b00000, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:47:41,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41903-0x1004712a1b00001, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:47:41,979 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T05:47:41,979 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T05:47:41,979 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T05:47:41,979 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3456ee6a3164,41903,1731736061321}] 2024-11-16T05:47:42,135 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T05:47:42,140 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34343, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T05:47:42,150 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T05:47:42,150 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T05:47:42,152 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3456ee6a3164%2C41903%2C1731736061321.meta, suffix=.meta, logDir=hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/WALs/3456ee6a3164,41903,1731736061321, archiveDir=hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/oldWALs, maxLogs=32 2024-11-16T05:47:42,155 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C41903%2C1731736061321.meta.1731736062155.meta 2024-11-16T05:47:42,161 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/WALs/3456ee6a3164,41903,1731736061321/3456ee6a3164%2C41903%2C1731736061321.meta.1731736062155.meta 2024-11-16T05:47:42,169 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46523:46523),(127.0.0.1/127.0.0.1:39937:39937)] 2024-11-16T05:47:42,170 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T05:47:42,171 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T05:47:42,171 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T05:47:42,171 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T05:47:42,171 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T05:47:42,171 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:47:42,171 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T05:47:42,171 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T05:47:42,173 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T05:47:42,174 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T05:47:42,175 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:47:42,175 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:47:42,175 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T05:47:42,176 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T05:47:42,176 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:47:42,177 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:47:42,177 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T05:47:42,178 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T05:47:42,178 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:47:42,179 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:47:42,179 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T05:47:42,180 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T05:47:42,180 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:47:42,180 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:47:42,180 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T05:47:42,181 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/data/hbase/meta/1588230740 2024-11-16T05:47:42,183 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/data/hbase/meta/1588230740 2024-11-16T05:47:42,184 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T05:47:42,184 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T05:47:42,185 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T05:47:42,186 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T05:47:42,187 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=798401, jitterRate=0.01521967351436615}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T05:47:42,187 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T05:47:42,188 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731736062172Writing region info on filesystem at 1731736062172Initializing all the Stores at 1731736062173 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736062173Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736062173Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736062173Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736062173Cleaning up temporary data from old regions at 1731736062184 (+11 ms)Running coprocessor post-open hooks at 1731736062187 (+3 ms)Region opened successfully at 1731736062188 (+1 ms) 2024-11-16T05:47:42,189 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731736062134 2024-11-16T05:47:42,192 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T05:47:42,192 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T05:47:42,193 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,41903,1731736061321 2024-11-16T05:47:42,194 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3456ee6a3164,41903,1731736061321, state=OPEN 2024-11-16T05:47:42,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43181-0x1004712a1b00000, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T05:47:42,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41903-0x1004712a1b00001, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T05:47:42,198 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3456ee6a3164,41903,1731736061321 2024-11-16T05:47:42,198 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T05:47:42,198 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T05:47:42,202 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T05:47:42,202 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3456ee6a3164,41903,1731736061321 in 219 msec 2024-11-16T05:47:42,204 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T05:47:42,204 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 641 msec 2024-11-16T05:47:42,205 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T05:47:42,205 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T05:47:42,207 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T05:47:42,207 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,41903,1731736061321, seqNum=-1] 2024-11-16T05:47:42,208 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T05:47:42,209 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53063, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T05:47:42,216 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 706 msec 2024-11-16T05:47:42,216 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731736062216, completionTime=-1 2024-11-16T05:47:42,217 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T05:47:42,217 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-16T05:47:42,219 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-16T05:47:42,219 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731736122219 2024-11-16T05:47:42,219 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731736182219 2024-11-16T05:47:42,219 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-16T05:47:42,220 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,43181,1731736061265-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:42,220 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,43181,1731736061265-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:42,220 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,43181,1731736061265-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:42,220 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3456ee6a3164:43181, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:42,220 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:42,220 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:42,222 DEBUG [master/3456ee6a3164:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T05:47:42,226 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.861sec 2024-11-16T05:47:42,226 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T05:47:42,226 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T05:47:42,226 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T05:47:42,226 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T05:47:42,226 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T05:47:42,226 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,43181,1731736061265-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T05:47:42,226 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,43181,1731736061265-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T05:47:42,229 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T05:47:42,230 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T05:47:42,230 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,43181,1731736061265-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:42,244 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21b0614b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T05:47:42,244 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,43181,-1 for getting cluster id 2024-11-16T05:47:42,244 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T05:47:42,246 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd024b467-23a3-4a44-8bc5-d01265951a5e' 2024-11-16T05:47:42,247 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T05:47:42,247 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d024b467-23a3-4a44-8bc5-d01265951a5e" 2024-11-16T05:47:42,247 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e80149a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T05:47:42,247 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,43181,-1] 2024-11-16T05:47:42,248 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T05:47:42,248 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:47:42,250 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47738, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T05:47:42,251 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@542ea236, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T05:47:42,251 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T05:47:42,252 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,41903,1731736061321, seqNum=-1] 2024-11-16T05:47:42,253 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T05:47:42,254 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50998, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T05:47:42,256 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3456ee6a3164,43181,1731736061265 2024-11-16T05:47:42,257 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:47:42,260 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T05:47:42,260 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T05:47:42,260 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T05:47:42,261 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T05:47:42,261 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:47:42,261 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:47:42,261 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T05:47:42,261 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T05:47:42,261 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1365984571, stopped=false 2024-11-16T05:47:42,261 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3456ee6a3164,43181,1731736061265 2024-11-16T05:47:42,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41903-0x1004712a1b00001, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T05:47:42,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41903-0x1004712a1b00001, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:47:42,263 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T05:47:42,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43181-0x1004712a1b00000, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T05:47:42,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43181-0x1004712a1b00000, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:47:42,263 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T05:47:42,263 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T05:47:42,263 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:47:42,263 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41903-0x1004712a1b00001, quorum=127.0.0.1:64594, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T05:47:42,263 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3456ee6a3164,41903,1731736061321' ***** 2024-11-16T05:47:42,263 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T05:47:42,263 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43181-0x1004712a1b00000, quorum=127.0.0.1:64594, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T05:47:42,264 INFO [RS:0;3456ee6a3164:41903 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T05:47:42,264 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T05:47:42,264 INFO [RS:0;3456ee6a3164:41903 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T05:47:42,264 INFO [RS:0;3456ee6a3164:41903 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T05:47:42,264 INFO [RS:0;3456ee6a3164:41903 {}] regionserver.HRegionServer(959): stopping server 3456ee6a3164,41903,1731736061321 2024-11-16T05:47:42,264 INFO [RS:0;3456ee6a3164:41903 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T05:47:42,264 INFO [RS:0;3456ee6a3164:41903 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3456ee6a3164:41903. 2024-11-16T05:47:42,264 DEBUG [RS:0;3456ee6a3164:41903 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T05:47:42,264 DEBUG [RS:0;3456ee6a3164:41903 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:47:42,264 INFO [RS:0;3456ee6a3164:41903 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T05:47:42,264 INFO [RS:0;3456ee6a3164:41903 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T05:47:42,264 INFO [RS:0;3456ee6a3164:41903 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T05:47:42,264 INFO [RS:0;3456ee6a3164:41903 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T05:47:42,265 INFO [RS:0;3456ee6a3164:41903 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-16T05:47:42,265 DEBUG [RS:0;3456ee6a3164:41903 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-16T05:47:42,265 DEBUG [RS:0;3456ee6a3164:41903 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-16T05:47:42,265 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T05:47:42,265 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T05:47:42,265 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T05:47:42,265 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T05:47:42,265 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T05:47:42,265 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-16T05:47:42,284 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/data/hbase/meta/1588230740/.tmp/ns/c17b31f95c69485cbd7a34232842ff6d is 43, key is default/ns:d/1731736062210/Put/seqid=0 2024-11-16T05:47:42,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41539 is added to blk_1073741835_1011 (size=5153) 2024-11-16T05:47:42,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741835_1011 (size=5153) 2024-11-16T05:47:42,290 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/data/hbase/meta/1588230740/.tmp/ns/c17b31f95c69485cbd7a34232842ff6d 2024-11-16T05:47:42,299 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/data/hbase/meta/1588230740/.tmp/ns/c17b31f95c69485cbd7a34232842ff6d as hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/data/hbase/meta/1588230740/ns/c17b31f95c69485cbd7a34232842ff6d 2024-11-16T05:47:42,307 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/data/hbase/meta/1588230740/ns/c17b31f95c69485cbd7a34232842ff6d, entries=2, sequenceid=6, filesize=5.0 K 2024-11-16T05:47:42,308 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 43ms, sequenceid=6, compaction requested=false 2024-11-16T05:47:42,308 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T05:47:42,314 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-16T05:47:42,315 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T05:47:42,315 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T05:47:42,315 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731736062265Running coprocessor pre-close hooks at 1731736062265Disabling compacts and flushes for region at 1731736062265Disabling writes for close at 1731736062265Obtaining lock to block concurrent updates at 1731736062265Preparing flush snapshotting stores in 1588230740 at 1731736062265Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731736062266 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731736062266Flushing 1588230740/ns: creating writer at 1731736062266Flushing 1588230740/ns: appending metadata at 1731736062283 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1731736062283Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2a9c0e00: reopening flushed file at 1731736062297 (+14 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 43ms, sequenceid=6, compaction requested=false at 1731736062308 (+11 ms)Writing region close event to WAL at 1731736062310 (+2 ms)Running coprocessor post-close hooks at 1731736062315 (+5 ms)Closed at 1731736062315 2024-11-16T05:47:42,316 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T05:47:42,465 INFO [RS:0;3456ee6a3164:41903 {}] regionserver.HRegionServer(976): stopping server 3456ee6a3164,41903,1731736061321; all regions closed. 2024-11-16T05:47:42,466 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:42,466 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:42,466 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:42,466 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:42,466 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:42,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741834_1010 (size=1152) 2024-11-16T05:47:42,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41539 is added to blk_1073741834_1010 (size=1152) 2024-11-16T05:47:42,471 DEBUG [RS:0;3456ee6a3164:41903 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/oldWALs 2024-11-16T05:47:42,471 INFO [RS:0;3456ee6a3164:41903 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3456ee6a3164%2C41903%2C1731736061321.meta:.meta(num 1731736062155) 2024-11-16T05:47:42,471 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:42,471 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:42,472 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:42,472 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:42,472 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:42,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41539 is added to blk_1073741833_1009 (size=93) 2024-11-16T05:47:42,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741833_1009 (size=93) 2024-11-16T05:47:42,476 DEBUG [RS:0;3456ee6a3164:41903 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/oldWALs 2024-11-16T05:47:42,476 INFO [RS:0;3456ee6a3164:41903 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3456ee6a3164%2C41903%2C1731736061321:(num 1731736061733) 2024-11-16T05:47:42,476 DEBUG [RS:0;3456ee6a3164:41903 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:47:42,476 INFO [RS:0;3456ee6a3164:41903 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T05:47:42,476 INFO [RS:0;3456ee6a3164:41903 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T05:47:42,476 INFO [RS:0;3456ee6a3164:41903 {}] hbase.ChoreService(370): Chore service for: regionserver/3456ee6a3164:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-16T05:47:42,476 INFO [RS:0;3456ee6a3164:41903 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T05:47:42,476 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T05:47:42,477 INFO [RS:0;3456ee6a3164:41903 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41903 2024-11-16T05:47:42,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41903-0x1004712a1b00001, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3456ee6a3164,41903,1731736061321 2024-11-16T05:47:42,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43181-0x1004712a1b00000, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T05:47:42,480 INFO [RS:0;3456ee6a3164:41903 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T05:47:42,482 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3456ee6a3164,41903,1731736061321] 2024-11-16T05:47:42,484 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3456ee6a3164,41903,1731736061321 already deleted, retry=false 2024-11-16T05:47:42,484 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3456ee6a3164,41903,1731736061321 expired; onlineServers=0 2024-11-16T05:47:42,484 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3456ee6a3164,43181,1731736061265' ***** 2024-11-16T05:47:42,484 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T05:47:42,484 INFO [M:0;3456ee6a3164:43181 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T05:47:42,484 INFO [M:0;3456ee6a3164:43181 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T05:47:42,484 DEBUG [M:0;3456ee6a3164:43181 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T05:47:42,484 DEBUG [M:0;3456ee6a3164:43181 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T05:47:42,484 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T05:47:42,484 DEBUG [master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.large.0-1731736061517 {}] cleaner.HFileCleaner(306): Exit Thread[master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.large.0-1731736061517,5,FailOnTimeoutGroup] 2024-11-16T05:47:42,484 DEBUG [master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.small.0-1731736061517 {}] cleaner.HFileCleaner(306): Exit Thread[master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.small.0-1731736061517,5,FailOnTimeoutGroup] 2024-11-16T05:47:42,484 INFO [M:0;3456ee6a3164:43181 {}] hbase.ChoreService(370): Chore service for: master/3456ee6a3164:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T05:47:42,485 INFO [M:0;3456ee6a3164:43181 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T05:47:42,485 DEBUG [M:0;3456ee6a3164:43181 {}] master.HMaster(1795): Stopping service threads 2024-11-16T05:47:42,485 INFO [M:0;3456ee6a3164:43181 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T05:47:42,485 INFO [M:0;3456ee6a3164:43181 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T05:47:42,485 INFO [M:0;3456ee6a3164:43181 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T05:47:42,485 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T05:47:42,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43181-0x1004712a1b00000, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T05:47:42,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43181-0x1004712a1b00000, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:47:42,486 DEBUG [M:0;3456ee6a3164:43181 {}] zookeeper.ZKUtil(347): master:43181-0x1004712a1b00000, quorum=127.0.0.1:64594, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T05:47:42,486 WARN [M:0;3456ee6a3164:43181 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T05:47:42,487 INFO [M:0;3456ee6a3164:43181 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/.lastflushedseqids 2024-11-16T05:47:42,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41539 is added to blk_1073741836_1012 (size=99) 2024-11-16T05:47:42,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741836_1012 (size=99) 2024-11-16T05:47:42,494 INFO [M:0;3456ee6a3164:43181 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T05:47:42,494 INFO [M:0;3456ee6a3164:43181 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T05:47:42,495 DEBUG [M:0;3456ee6a3164:43181 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T05:47:42,495 INFO [M:0;3456ee6a3164:43181 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:47:42,495 DEBUG [M:0;3456ee6a3164:43181 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:47:42,495 DEBUG [M:0;3456ee6a3164:43181 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T05:47:42,495 DEBUG [M:0;3456ee6a3164:43181 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:47:42,495 INFO [M:0;3456ee6a3164:43181 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-16T05:47:42,517 DEBUG [M:0;3456ee6a3164:43181 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/390568ea7327437bb53d81a11d351e6f is 82, key is hbase:meta,,1/info:regioninfo/1731736062193/Put/seqid=0 2024-11-16T05:47:42,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41539 is added to blk_1073741837_1013 (size=5672) 2024-11-16T05:47:42,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741837_1013 (size=5672) 2024-11-16T05:47:42,523 INFO [M:0;3456ee6a3164:43181 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/390568ea7327437bb53d81a11d351e6f 2024-11-16T05:47:42,543 DEBUG [M:0;3456ee6a3164:43181 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6e6a06fb42e44b26b67df07337c1d93a is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731736062215/Put/seqid=0 2024-11-16T05:47:42,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41539 is added to blk_1073741838_1014 (size=5275) 2024-11-16T05:47:42,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741838_1014 (size=5275) 2024-11-16T05:47:42,549 INFO [M:0;3456ee6a3164:43181 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6e6a06fb42e44b26b67df07337c1d93a 2024-11-16T05:47:42,568 DEBUG [M:0;3456ee6a3164:43181 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/af59ab8d1bb34ad983c9d364a5b2c93f is 69, key is 3456ee6a3164,41903,1731736061321/rs:state/1731736061572/Put/seqid=0 2024-11-16T05:47:42,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41539 is added to blk_1073741839_1015 (size=5156) 2024-11-16T05:47:42,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741839_1015 (size=5156) 2024-11-16T05:47:42,574 INFO [M:0;3456ee6a3164:43181 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/af59ab8d1bb34ad983c9d364a5b2c93f 2024-11-16T05:47:42,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41903-0x1004712a1b00001, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T05:47:42,582 INFO [RS:0;3456ee6a3164:41903 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T05:47:42,582 INFO [RS:0;3456ee6a3164:41903 {}] regionserver.HRegionServer(1031): Exiting; stopping=3456ee6a3164,41903,1731736061321; zookeeper connection closed. 2024-11-16T05:47:42,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41903-0x1004712a1b00001, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T05:47:42,582 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2533f619 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2533f619 2024-11-16T05:47:42,583 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T05:47:42,597 DEBUG [M:0;3456ee6a3164:43181 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7902d9ab5d324e0facb078e26c1df68e is 52, key is load_balancer_on/state:d/1731736062259/Put/seqid=0 2024-11-16T05:47:42,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741840_1016 (size=5056) 2024-11-16T05:47:42,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41539 is added to blk_1073741840_1016 (size=5056) 2024-11-16T05:47:42,603 INFO [M:0;3456ee6a3164:43181 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7902d9ab5d324e0facb078e26c1df68e 2024-11-16T05:47:42,610 DEBUG [M:0;3456ee6a3164:43181 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/390568ea7327437bb53d81a11d351e6f as hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/390568ea7327437bb53d81a11d351e6f 2024-11-16T05:47:42,618 INFO [M:0;3456ee6a3164:43181 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/390568ea7327437bb53d81a11d351e6f, entries=8, sequenceid=29, filesize=5.5 K 2024-11-16T05:47:42,619 DEBUG [M:0;3456ee6a3164:43181 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6e6a06fb42e44b26b67df07337c1d93a as hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6e6a06fb42e44b26b67df07337c1d93a 2024-11-16T05:47:42,626 INFO [M:0;3456ee6a3164:43181 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6e6a06fb42e44b26b67df07337c1d93a, entries=3, sequenceid=29, filesize=5.2 K 2024-11-16T05:47:42,627 DEBUG [M:0;3456ee6a3164:43181 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/af59ab8d1bb34ad983c9d364a5b2c93f as hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/af59ab8d1bb34ad983c9d364a5b2c93f 2024-11-16T05:47:42,633 INFO [M:0;3456ee6a3164:43181 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/af59ab8d1bb34ad983c9d364a5b2c93f, entries=1, sequenceid=29, filesize=5.0 K 2024-11-16T05:47:42,635 DEBUG [M:0;3456ee6a3164:43181 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7902d9ab5d324e0facb078e26c1df68e as hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7902d9ab5d324e0facb078e26c1df68e 2024-11-16T05:47:42,642 INFO [M:0;3456ee6a3164:43181 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46059/user/jenkins/test-data/127498e9-dea0-a8ca-b1ee-86385792556f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7902d9ab5d324e0facb078e26c1df68e, entries=1, sequenceid=29, filesize=4.9 K 2024-11-16T05:47:42,644 INFO [M:0;3456ee6a3164:43181 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 148ms, sequenceid=29, compaction requested=false 2024-11-16T05:47:42,645 INFO [M:0;3456ee6a3164:43181 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:47:42,646 DEBUG [M:0;3456ee6a3164:43181 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731736062494Disabling compacts and flushes for region at 1731736062494Disabling writes for close at 1731736062495 (+1 ms)Obtaining lock to block concurrent updates at 1731736062495Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731736062495Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731736062496 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731736062496Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731736062497 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731736062517 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731736062517Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731736062529 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731736062543 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731736062543Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731736062555 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731736062568 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731736062568Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731736062580 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731736062597 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731736062597Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@670700f4: reopening flushed file at 1731736062609 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@79c7f2b0: reopening flushed file at 1731736062618 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@312f18ce: reopening flushed file at 1731736062626 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@134d1f: reopening flushed file at 1731736062634 (+8 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 148ms, sequenceid=29, compaction requested=false at 1731736062644 (+10 ms)Writing region close event to WAL at 1731736062645 (+1 ms)Closed at 1731736062645 2024-11-16T05:47:42,646 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:42,646 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:42,646 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:42,646 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:42,647 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:42,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41539 is added to blk_1073741830_1006 (size=10311) 2024-11-16T05:47:42,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44989 is added to blk_1073741830_1006 (size=10311) 2024-11-16T05:47:42,650 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T05:47:42,650 INFO [M:0;3456ee6a3164:43181 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T05:47:42,650 INFO [M:0;3456ee6a3164:43181 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43181 2024-11-16T05:47:42,650 INFO [M:0;3456ee6a3164:43181 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T05:47:42,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43181-0x1004712a1b00000, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T05:47:42,752 INFO [M:0;3456ee6a3164:43181 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T05:47:42,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43181-0x1004712a1b00000, quorum=127.0.0.1:64594, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T05:47:42,758 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5a9a3682{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:47:42,759 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4b589a7d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T05:47:42,759 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T05:47:42,760 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6463767b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T05:47:42,760 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c182ba3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/hadoop.log.dir/,STOPPED} 2024-11-16T05:47:42,762 WARN [BP-1161465139-172.17.0.2-1731736060639 heartbeating to localhost/127.0.0.1:46059 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T05:47:42,762 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T05:47:42,762 WARN [BP-1161465139-172.17.0.2-1731736060639 heartbeating to localhost/127.0.0.1:46059 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1161465139-172.17.0.2-1731736060639 (Datanode Uuid 198c97ca-fa04-4816-9570-6c849ba6a672) service to localhost/127.0.0.1:46059 2024-11-16T05:47:42,762 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T05:47:42,762 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/cluster_49bfe15b-541d-f195-abdd-dae0ce367ef9/data/data3/current/BP-1161465139-172.17.0.2-1731736060639 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:47:42,763 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/cluster_49bfe15b-541d-f195-abdd-dae0ce367ef9/data/data4/current/BP-1161465139-172.17.0.2-1731736060639 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:47:42,763 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T05:47:42,766 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6bbcfb57{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:47:42,766 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3590efb4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T05:47:42,766 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T05:47:42,766 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a2cf006{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T05:47:42,767 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4935312d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/hadoop.log.dir/,STOPPED} 2024-11-16T05:47:42,768 WARN [BP-1161465139-172.17.0.2-1731736060639 heartbeating to localhost/127.0.0.1:46059 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T05:47:42,768 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T05:47:42,768 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T05:47:42,768 WARN [BP-1161465139-172.17.0.2-1731736060639 heartbeating to localhost/127.0.0.1:46059 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1161465139-172.17.0.2-1731736060639 (Datanode Uuid 43bb0412-df79-4383-805a-8ad836aa519b) service to localhost/127.0.0.1:46059 2024-11-16T05:47:42,769 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/cluster_49bfe15b-541d-f195-abdd-dae0ce367ef9/data/data1/current/BP-1161465139-172.17.0.2-1731736060639 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:47:42,769 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/cluster_49bfe15b-541d-f195-abdd-dae0ce367ef9/data/data2/current/BP-1161465139-172.17.0.2-1731736060639 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:47:42,769 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T05:47:42,776 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@54c711e7{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T05:47:42,777 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@124e4130{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T05:47:42,777 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T05:47:42,777 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@591c1a6f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T05:47:42,777 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@44938570{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/hadoop.log.dir/,STOPPED} 2024-11-16T05:47:42,784 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T05:47:42,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T05:47:42,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T05:47:42,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/hadoop.log.dir so I do NOT create it in target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63 2024-11-16T05:47:42,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e077e1a0-b09a-3471-4fa4-77cb63efe962/hadoop.tmp.dir so I do NOT create it in target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63 2024-11-16T05:47:42,807 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d, deleteOnExit=true 2024-11-16T05:47:42,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T05:47:42,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/test.cache.data in system properties and HBase conf 2024-11-16T05:47:42,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T05:47:42,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/hadoop.log.dir in system properties and HBase conf 2024-11-16T05:47:42,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T05:47:42,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T05:47:42,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T05:47:42,808 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T05:47:42,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T05:47:42,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T05:47:42,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T05:47:42,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T05:47:42,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T05:47:42,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T05:47:42,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T05:47:42,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T05:47:42,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T05:47:42,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/nfs.dump.dir in system properties and HBase conf 2024-11-16T05:47:42,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/java.io.tmpdir in system properties and HBase conf 2024-11-16T05:47:42,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T05:47:42,810 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T05:47:42,810 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T05:47:42,821 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T05:47:42,872 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:47:42,878 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T05:47:42,880 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T05:47:42,880 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T05:47:42,881 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T05:47:42,881 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:47:42,882 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4799fc2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/hadoop.log.dir/,AVAILABLE} 2024-11-16T05:47:42,882 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@302502f5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T05:47:42,976 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1b2c714b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/java.io.tmpdir/jetty-localhost-40253-hadoop-hdfs-3_4_1-tests_jar-_-any-15384866535947105726/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T05:47:42,977 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@78100011{HTTP/1.1, (http/1.1)}{localhost:40253} 2024-11-16T05:47:42,977 INFO [Time-limited test {}] server.Server(415): Started @104675ms 2024-11-16T05:47:42,988 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T05:47:43,038 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:47:43,042 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T05:47:43,042 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T05:47:43,042 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T05:47:43,043 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T05:47:43,043 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c43bbf6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/hadoop.log.dir/,AVAILABLE} 2024-11-16T05:47:43,043 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a95d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T05:47:43,137 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7a34f554{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/java.io.tmpdir/jetty-localhost-33477-hadoop-hdfs-3_4_1-tests_jar-_-any-11752221932381584429/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:47:43,137 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@225c4391{HTTP/1.1, (http/1.1)}{localhost:33477} 2024-11-16T05:47:43,137 INFO [Time-limited test {}] server.Server(415): Started @104835ms 2024-11-16T05:47:43,139 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T05:47:43,166 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:47:43,170 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T05:47:43,171 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T05:47:43,171 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T05:47:43,171 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T05:47:43,173 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54c0bf3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/hadoop.log.dir/,AVAILABLE} 2024-11-16T05:47:43,174 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5c5cbc59{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T05:47:43,201 WARN [Thread-657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data1/current/BP-721148889-172.17.0.2-1731736062832/current, will proceed with Du for space computation calculation, 2024-11-16T05:47:43,202 WARN [Thread-658 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data2/current/BP-721148889-172.17.0.2-1731736062832/current, will proceed with Du for space computation calculation, 2024-11-16T05:47:43,228 WARN [Thread-636 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T05:47:43,231 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x20d2a2ce8eaa2661 with lease ID 0xbb7795547e792d77: Processing first storage report for DS-d44ada13-9000-43de-afa5-1afda4a2b5ad from datanode DatanodeRegistration(127.0.0.1:40457, datanodeUuid=ac5ff1c1-40f4-4466-8380-85ac8b35eb01, infoPort=41573, infoSecurePort=0, ipcPort=35993, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832) 2024-11-16T05:47:43,231 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x20d2a2ce8eaa2661 with lease ID 0xbb7795547e792d77: from storage DS-d44ada13-9000-43de-afa5-1afda4a2b5ad node DatanodeRegistration(127.0.0.1:40457, datanodeUuid=ac5ff1c1-40f4-4466-8380-85ac8b35eb01, infoPort=41573, infoSecurePort=0, ipcPort=35993, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:47:43,232 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x20d2a2ce8eaa2661 with lease ID 0xbb7795547e792d77: Processing first storage report for DS-4531829e-2265-4bca-9a2d-883cb94f83e6 from datanode DatanodeRegistration(127.0.0.1:40457, datanodeUuid=ac5ff1c1-40f4-4466-8380-85ac8b35eb01, infoPort=41573, infoSecurePort=0, ipcPort=35993, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832) 2024-11-16T05:47:43,232 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x20d2a2ce8eaa2661 with lease ID 0xbb7795547e792d77: from storage DS-4531829e-2265-4bca-9a2d-883cb94f83e6 node DatanodeRegistration(127.0.0.1:40457, datanodeUuid=ac5ff1c1-40f4-4466-8380-85ac8b35eb01, infoPort=41573, infoSecurePort=0, ipcPort=35993, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:47:43,273 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3f1aa8ab{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/java.io.tmpdir/jetty-localhost-36355-hadoop-hdfs-3_4_1-tests_jar-_-any-1327465640079083568/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:47:43,274 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@779704d3{HTTP/1.1, (http/1.1)}{localhost:36355} 2024-11-16T05:47:43,274 INFO [Time-limited test {}] server.Server(415): Started @104972ms 2024-11-16T05:47:43,275 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T05:47:43,342 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data3/current/BP-721148889-172.17.0.2-1731736062832/current, will proceed with Du for space computation calculation, 2024-11-16T05:47:43,343 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data4/current/BP-721148889-172.17.0.2-1731736062832/current, will proceed with Du for space computation calculation, 2024-11-16T05:47:43,364 WARN [Thread-672 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T05:47:43,367 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe1d8a6288b8a4751 with lease ID 0xbb7795547e792d78: Processing first storage report for DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163 from datanode DatanodeRegistration(127.0.0.1:37035, datanodeUuid=aadbfb3b-707a-4c20-b5d9-9812720a1509, infoPort=39465, infoSecurePort=0, ipcPort=34121, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832) 2024-11-16T05:47:43,367 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe1d8a6288b8a4751 with lease ID 0xbb7795547e792d78: from storage DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163 node DatanodeRegistration(127.0.0.1:37035, datanodeUuid=aadbfb3b-707a-4c20-b5d9-9812720a1509, infoPort=39465, infoSecurePort=0, ipcPort=34121, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:47:43,367 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe1d8a6288b8a4751 with lease ID 0xbb7795547e792d78: Processing first storage report for DS-e8eebe27-dffb-4fe4-93a5-398123953f20 from datanode DatanodeRegistration(127.0.0.1:37035, datanodeUuid=aadbfb3b-707a-4c20-b5d9-9812720a1509, infoPort=39465, infoSecurePort=0, ipcPort=34121, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832) 2024-11-16T05:47:43,367 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe1d8a6288b8a4751 with lease ID 0xbb7795547e792d78: from storage DS-e8eebe27-dffb-4fe4-93a5-398123953f20 node DatanodeRegistration(127.0.0.1:37035, datanodeUuid=aadbfb3b-707a-4c20-b5d9-9812720a1509, infoPort=39465, infoSecurePort=0, ipcPort=34121, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:47:43,402 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63 2024-11-16T05:47:43,405 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/zookeeper_0, clientPort=50879, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T05:47:43,406 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50879 2024-11-16T05:47:43,406 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:47:43,408 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:47:43,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37035 is added to blk_1073741825_1001 (size=7) 2024-11-16T05:47:43,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40457 is added to blk_1073741825_1001 (size=7) 2024-11-16T05:47:43,420 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7 with version=8 2024-11-16T05:47:43,420 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/hbase-staging 2024-11-16T05:47:43,422 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3456ee6a3164:0 server-side Connection retries=45 2024-11-16T05:47:43,422 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T05:47:43,422 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T05:47:43,422 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T05:47:43,422 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T05:47:43,422 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T05:47:43,422 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T05:47:43,422 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T05:47:43,423 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44501 2024-11-16T05:47:43,424 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44501 connecting to ZooKeeper ensemble=127.0.0.1:50879 2024-11-16T05:47:43,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:445010x0, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T05:47:43,432 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44501-0x1004712aa1b0000 connected 2024-11-16T05:47:43,445 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:47:43,447 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:47:43,449 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44501-0x1004712aa1b0000, quorum=127.0.0.1:50879, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T05:47:43,449 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7, hbase.cluster.distributed=false 2024-11-16T05:47:43,451 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44501-0x1004712aa1b0000, quorum=127.0.0.1:50879, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T05:47:43,451 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44501 2024-11-16T05:47:43,451 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44501 2024-11-16T05:47:43,452 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44501 2024-11-16T05:47:43,452 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44501 2024-11-16T05:47:43,452 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44501 2024-11-16T05:47:43,466 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3456ee6a3164:0 server-side Connection retries=45 2024-11-16T05:47:43,467 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T05:47:43,467 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T05:47:43,467 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T05:47:43,467 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T05:47:43,467 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T05:47:43,467 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T05:47:43,467 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T05:47:43,469 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46863 2024-11-16T05:47:43,470 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46863 connecting to ZooKeeper ensemble=127.0.0.1:50879 2024-11-16T05:47:43,471 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:47:43,473 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:47:43,477 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:468630x0, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T05:47:43,477 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:468630x0, quorum=127.0.0.1:50879, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T05:47:43,477 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46863-0x1004712aa1b0001 connected 2024-11-16T05:47:43,478 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T05:47:43,485 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T05:47:43,486 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46863-0x1004712aa1b0001, quorum=127.0.0.1:50879, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T05:47:43,487 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46863-0x1004712aa1b0001, quorum=127.0.0.1:50879, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T05:47:43,489 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46863 2024-11-16T05:47:43,490 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46863 2024-11-16T05:47:43,491 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46863 2024-11-16T05:47:43,492 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46863 2024-11-16T05:47:43,492 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46863 2024-11-16T05:47:43,504 DEBUG [M:0;3456ee6a3164:44501 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3456ee6a3164:44501 2024-11-16T05:47:43,504 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3456ee6a3164,44501,1731736063421 2024-11-16T05:47:43,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44501-0x1004712aa1b0000, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T05:47:43,505 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46863-0x1004712aa1b0001, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T05:47:43,506 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44501-0x1004712aa1b0000, quorum=127.0.0.1:50879, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3456ee6a3164,44501,1731736063421 2024-11-16T05:47:43,507 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46863-0x1004712aa1b0001, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T05:47:43,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44501-0x1004712aa1b0000, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:47:43,507 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46863-0x1004712aa1b0001, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:47:43,507 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44501-0x1004712aa1b0000, quorum=127.0.0.1:50879, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T05:47:43,508 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3456ee6a3164,44501,1731736063421 from backup master directory 2024-11-16T05:47:43,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44501-0x1004712aa1b0000, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3456ee6a3164,44501,1731736063421 2024-11-16T05:47:43,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44501-0x1004712aa1b0000, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T05:47:43,509 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46863-0x1004712aa1b0001, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T05:47:43,509 WARN [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T05:47:43,509 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3456ee6a3164,44501,1731736063421 2024-11-16T05:47:43,517 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/hbase.id] with ID: 4ce07069-b301-4bb6-8354-bff376268c5b 2024-11-16T05:47:43,517 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/.tmp/hbase.id 2024-11-16T05:47:43,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37035 is added to blk_1073741826_1002 (size=42) 2024-11-16T05:47:43,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40457 is added to blk_1073741826_1002 (size=42) 2024-11-16T05:47:43,527 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/.tmp/hbase.id]:[hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/hbase.id] 2024-11-16T05:47:43,541 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:47:43,542 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T05:47:43,544 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-16T05:47:43,545 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46863-0x1004712aa1b0001, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:47:43,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44501-0x1004712aa1b0000, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:47:43,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37035 is added to blk_1073741827_1003 (size=196) 2024-11-16T05:47:43,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40457 is added to blk_1073741827_1003 (size=196) 2024-11-16T05:47:43,554 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T05:47:43,555 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T05:47:43,555 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T05:47:43,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40457 is added to blk_1073741828_1004 (size=1189) 2024-11-16T05:47:43,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37035 is added to blk_1073741828_1004 (size=1189) 2024-11-16T05:47:43,565 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/data/master/store 2024-11-16T05:47:43,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37035 is added to blk_1073741829_1005 (size=34) 2024-11-16T05:47:43,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40457 is added to blk_1073741829_1005 (size=34) 2024-11-16T05:47:43,572 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:47:43,573 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T05:47:43,573 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:47:43,573 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:47:43,573 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T05:47:43,573 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:47:43,573 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:47:43,573 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731736063573Disabling compacts and flushes for region at 1731736063573Disabling writes for close at 1731736063573Writing region close event to WAL at 1731736063573Closed at 1731736063573 2024-11-16T05:47:43,574 WARN [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/data/master/store/.initializing 2024-11-16T05:47:43,574 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/WALs/3456ee6a3164,44501,1731736063421 2024-11-16T05:47:43,577 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3456ee6a3164%2C44501%2C1731736063421, suffix=, logDir=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/WALs/3456ee6a3164,44501,1731736063421, archiveDir=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/oldWALs, maxLogs=10 2024-11-16T05:47:43,577 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C44501%2C1731736063421.1731736063577 2024-11-16T05:47:43,583 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/WALs/3456ee6a3164,44501,1731736063421/3456ee6a3164%2C44501%2C1731736063421.1731736063577 2024-11-16T05:47:43,584 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39465:39465),(127.0.0.1/127.0.0.1:41573:41573)] 2024-11-16T05:47:43,588 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T05:47:43,589 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:47:43,589 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:47:43,589 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:47:43,591 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:47:43,593 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T05:47:43,593 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:47:43,593 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:47:43,594 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:47:43,595 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T05:47:43,595 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:47:43,596 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T05:47:43,596 INFO [regionserver/3456ee6a3164:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T05:47:43,596 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:47:43,597 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T05:47:43,597 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:47:43,598 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T05:47:43,598 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:47:43,599 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T05:47:43,599 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:47:43,600 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T05:47:43,600 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:47:43,601 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:47:43,601 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:47:43,603 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:47:43,603 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:47:43,603 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T05:47:43,604 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:47:43,609 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T05:47:43,610 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=710887, jitterRate=-0.09606145322322845}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T05:47:43,611 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731736063589Initializing all the Stores at 1731736063590 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736063590Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736063591 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736063591Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736063591Cleaning up temporary data from old regions at 1731736063603 (+12 ms)Region opened successfully at 1731736063611 (+8 ms) 2024-11-16T05:47:43,612 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T05:47:43,616 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cb3eb04, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3456ee6a3164/172.17.0.2:0 2024-11-16T05:47:43,617 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T05:47:43,617 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T05:47:43,617 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T05:47:43,617 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T05:47:43,618 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T05:47:43,618 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T05:47:43,618 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T05:47:43,621 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T05:47:43,622 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44501-0x1004712aa1b0000, quorum=127.0.0.1:50879, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T05:47:43,623 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T05:47:43,623 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T05:47:43,624 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44501-0x1004712aa1b0000, quorum=127.0.0.1:50879, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T05:47:43,624 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T05:47:43,625 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T05:47:43,626 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44501-0x1004712aa1b0000, quorum=127.0.0.1:50879, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T05:47:43,626 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T05:47:43,627 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44501-0x1004712aa1b0000, quorum=127.0.0.1:50879, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T05:47:43,628 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T05:47:43,630 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44501-0x1004712aa1b0000, quorum=127.0.0.1:50879, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T05:47:43,631 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T05:47:43,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44501-0x1004712aa1b0000, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T05:47:43,632 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46863-0x1004712aa1b0001, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T05:47:43,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44501-0x1004712aa1b0000, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:47:43,632 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46863-0x1004712aa1b0001, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:47:43,632 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3456ee6a3164,44501,1731736063421, sessionid=0x1004712aa1b0000, setting cluster-up flag (Was=false) 2024-11-16T05:47:43,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44501-0x1004712aa1b0000, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:47:43,635 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46863-0x1004712aa1b0001, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:47:43,638 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T05:47:43,639 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3456ee6a3164,44501,1731736063421 2024-11-16T05:47:43,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44501-0x1004712aa1b0000, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:47:43,642 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46863-0x1004712aa1b0001, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:47:43,645 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T05:47:43,646 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3456ee6a3164,44501,1731736063421 2024-11-16T05:47:43,647 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T05:47:43,649 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T05:47:43,649 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T05:47:43,649 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T05:47:43,650 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3456ee6a3164,44501,1731736063421 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T05:47:43,651 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3456ee6a3164:0, corePoolSize=5, maxPoolSize=5 2024-11-16T05:47:43,651 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3456ee6a3164:0, corePoolSize=5, maxPoolSize=5 2024-11-16T05:47:43,651 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3456ee6a3164:0, corePoolSize=5, maxPoolSize=5 2024-11-16T05:47:43,651 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3456ee6a3164:0, corePoolSize=5, maxPoolSize=5 2024-11-16T05:47:43,651 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3456ee6a3164:0, corePoolSize=10, maxPoolSize=10 2024-11-16T05:47:43,651 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:43,651 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3456ee6a3164:0, corePoolSize=2, maxPoolSize=2 2024-11-16T05:47:43,651 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:43,655 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T05:47:43,655 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T05:47:43,656 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:47:43,656 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T05:47:43,662 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731736093661 2024-11-16T05:47:43,662 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T05:47:43,662 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T05:47:43,662 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T05:47:43,662 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T05:47:43,662 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T05:47:43,662 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T05:47:43,663 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:43,664 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T05:47:43,664 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T05:47:43,664 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T05:47:43,664 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T05:47:43,664 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T05:47:43,664 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.large.0-1731736063664,5,FailOnTimeoutGroup] 2024-11-16T05:47:43,665 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.small.0-1731736063664,5,FailOnTimeoutGroup] 2024-11-16T05:47:43,665 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:43,665 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T05:47:43,665 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:43,665 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:43,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40457 is added to blk_1073741831_1007 (size=1321) 2024-11-16T05:47:43,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37035 is added to blk_1073741831_1007 (size=1321) 2024-11-16T05:47:43,673 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T05:47:43,673 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7 2024-11-16T05:47:43,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37035 is added to blk_1073741832_1008 (size=32) 2024-11-16T05:47:43,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40457 is added to blk_1073741832_1008 (size=32) 2024-11-16T05:47:43,684 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:47:43,688 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T05:47:43,690 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T05:47:43,691 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:47:43,691 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:47:43,692 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T05:47:43,694 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T05:47:43,694 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:47:43,694 INFO [RS:0;3456ee6a3164:46863 {}] regionserver.HRegionServer(746): ClusterId : 4ce07069-b301-4bb6-8354-bff376268c5b 2024-11-16T05:47:43,694 DEBUG [RS:0;3456ee6a3164:46863 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T05:47:43,695 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:47:43,695 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T05:47:43,696 DEBUG [RS:0;3456ee6a3164:46863 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T05:47:43,696 DEBUG [RS:0;3456ee6a3164:46863 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T05:47:43,696 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T05:47:43,697 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:47:43,697 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:47:43,697 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T05:47:43,698 DEBUG [RS:0;3456ee6a3164:46863 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T05:47:43,698 DEBUG [RS:0;3456ee6a3164:46863 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e4b53c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3456ee6a3164/172.17.0.2:0 2024-11-16T05:47:43,699 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T05:47:43,699 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:47:43,700 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:47:43,701 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T05:47:43,702 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/hbase/meta/1588230740 2024-11-16T05:47:43,702 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/hbase/meta/1588230740 2024-11-16T05:47:43,704 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T05:47:43,704 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T05:47:43,704 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T05:47:43,706 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T05:47:43,710 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T05:47:43,710 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=822097, jitterRate=0.04535110294818878}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T05:47:43,711 DEBUG [RS:0;3456ee6a3164:46863 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3456ee6a3164:46863 2024-11-16T05:47:43,711 INFO [RS:0;3456ee6a3164:46863 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T05:47:43,711 INFO [RS:0;3456ee6a3164:46863 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T05:47:43,711 DEBUG [RS:0;3456ee6a3164:46863 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T05:47:43,712 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731736063685Initializing all the Stores at 1731736063685Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736063685Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736063688 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736063688Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736063688Cleaning up temporary data from old regions at 1731736063704 (+16 ms)Region opened successfully at 1731736063712 (+8 ms) 2024-11-16T05:47:43,712 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T05:47:43,712 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T05:47:43,712 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T05:47:43,712 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T05:47:43,712 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T05:47:43,712 INFO [RS:0;3456ee6a3164:46863 {}] regionserver.HRegionServer(2659): reportForDuty to master=3456ee6a3164,44501,1731736063421 with port=46863, startcode=1731736063466 2024-11-16T05:47:43,712 DEBUG [RS:0;3456ee6a3164:46863 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T05:47:43,713 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T05:47:43,713 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731736063712Disabling compacts and flushes for region at 1731736063712Disabling writes for close at 1731736063712Writing region close event to WAL at 1731736063712Closed at 1731736063712 2024-11-16T05:47:43,714 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T05:47:43,714 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T05:47:43,714 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54165, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T05:47:43,715 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T05:47:43,715 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44501 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3456ee6a3164,46863,1731736063466 2024-11-16T05:47:43,715 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44501 {}] master.ServerManager(517): Registering regionserver=3456ee6a3164,46863,1731736063466 2024-11-16T05:47:43,716 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T05:47:43,717 DEBUG [RS:0;3456ee6a3164:46863 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7 2024-11-16T05:47:43,717 DEBUG [RS:0;3456ee6a3164:46863 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36821 2024-11-16T05:47:43,717 DEBUG [RS:0;3456ee6a3164:46863 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T05:47:43,718 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T05:47:43,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44501-0x1004712aa1b0000, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T05:47:43,719 DEBUG [RS:0;3456ee6a3164:46863 {}] zookeeper.ZKUtil(111): regionserver:46863-0x1004712aa1b0001, quorum=127.0.0.1:50879, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3456ee6a3164,46863,1731736063466 2024-11-16T05:47:43,719 WARN [RS:0;3456ee6a3164:46863 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T05:47:43,719 INFO [RS:0;3456ee6a3164:46863 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T05:47:43,720 DEBUG [RS:0;3456ee6a3164:46863 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466 2024-11-16T05:47:43,720 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3456ee6a3164,46863,1731736063466] 2024-11-16T05:47:43,724 INFO [RS:0;3456ee6a3164:46863 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T05:47:43,726 INFO [RS:0;3456ee6a3164:46863 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T05:47:43,727 INFO [RS:0;3456ee6a3164:46863 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T05:47:43,727 INFO [RS:0;3456ee6a3164:46863 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:43,729 INFO [RS:0;3456ee6a3164:46863 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T05:47:43,730 INFO [RS:0;3456ee6a3164:46863 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T05:47:43,730 INFO [RS:0;3456ee6a3164:46863 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:43,730 DEBUG [RS:0;3456ee6a3164:46863 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:43,730 DEBUG [RS:0;3456ee6a3164:46863 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:43,730 DEBUG [RS:0;3456ee6a3164:46863 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:43,730 DEBUG [RS:0;3456ee6a3164:46863 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:43,731 DEBUG [RS:0;3456ee6a3164:46863 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:43,731 DEBUG [RS:0;3456ee6a3164:46863 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3456ee6a3164:0, corePoolSize=2, maxPoolSize=2 2024-11-16T05:47:43,731 DEBUG [RS:0;3456ee6a3164:46863 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:43,731 DEBUG [RS:0;3456ee6a3164:46863 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:43,731 DEBUG [RS:0;3456ee6a3164:46863 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:43,731 DEBUG [RS:0;3456ee6a3164:46863 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:43,731 DEBUG [RS:0;3456ee6a3164:46863 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:43,731 DEBUG [RS:0;3456ee6a3164:46863 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:43,731 DEBUG [RS:0;3456ee6a3164:46863 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0, corePoolSize=3, maxPoolSize=3 2024-11-16T05:47:43,731 DEBUG [RS:0;3456ee6a3164:46863 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0, corePoolSize=3, maxPoolSize=3 2024-11-16T05:47:43,733 INFO [RS:0;3456ee6a3164:46863 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:43,733 INFO [RS:0;3456ee6a3164:46863 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:43,733 INFO [RS:0;3456ee6a3164:46863 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:43,734 INFO [RS:0;3456ee6a3164:46863 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:43,734 INFO [RS:0;3456ee6a3164:46863 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:43,734 INFO [RS:0;3456ee6a3164:46863 {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,46863,1731736063466-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T05:47:43,759 INFO [RS:0;3456ee6a3164:46863 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T05:47:43,759 INFO [RS:0;3456ee6a3164:46863 {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,46863,1731736063466-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:43,759 INFO [RS:0;3456ee6a3164:46863 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:43,760 INFO [RS:0;3456ee6a3164:46863 {}] regionserver.Replication(171): 3456ee6a3164,46863,1731736063466 started 2024-11-16T05:47:43,777 INFO [RS:0;3456ee6a3164:46863 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:43,777 INFO [RS:0;3456ee6a3164:46863 {}] regionserver.HRegionServer(1482): Serving as 3456ee6a3164,46863,1731736063466, RpcServer on 3456ee6a3164/172.17.0.2:46863, sessionid=0x1004712aa1b0001 2024-11-16T05:47:43,777 DEBUG [RS:0;3456ee6a3164:46863 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T05:47:43,777 DEBUG [RS:0;3456ee6a3164:46863 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3456ee6a3164,46863,1731736063466 2024-11-16T05:47:43,777 DEBUG [RS:0;3456ee6a3164:46863 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3456ee6a3164,46863,1731736063466' 2024-11-16T05:47:43,777 DEBUG [RS:0;3456ee6a3164:46863 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T05:47:43,778 DEBUG [RS:0;3456ee6a3164:46863 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T05:47:43,778 DEBUG [RS:0;3456ee6a3164:46863 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T05:47:43,778 DEBUG [RS:0;3456ee6a3164:46863 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T05:47:43,778 DEBUG [RS:0;3456ee6a3164:46863 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3456ee6a3164,46863,1731736063466 2024-11-16T05:47:43,778 DEBUG [RS:0;3456ee6a3164:46863 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3456ee6a3164,46863,1731736063466' 2024-11-16T05:47:43,778 DEBUG [RS:0;3456ee6a3164:46863 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T05:47:43,779 DEBUG [RS:0;3456ee6a3164:46863 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T05:47:43,780 DEBUG [RS:0;3456ee6a3164:46863 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T05:47:43,780 INFO [RS:0;3456ee6a3164:46863 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T05:47:43,780 INFO [RS:0;3456ee6a3164:46863 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T05:47:43,871 WARN [3456ee6a3164:44501 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-16T05:47:43,882 INFO [RS:0;3456ee6a3164:46863 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3456ee6a3164%2C46863%2C1731736063466, suffix=, logDir=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466, archiveDir=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/oldWALs, maxLogs=32 2024-11-16T05:47:43,883 INFO [RS:0;3456ee6a3164:46863 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C46863%2C1731736063466.1731736063883 2024-11-16T05:47:43,890 INFO [RS:0;3456ee6a3164:46863 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736063883 2024-11-16T05:47:43,893 DEBUG [RS:0;3456ee6a3164:46863 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41573:41573),(127.0.0.1/127.0.0.1:39465:39465)] 2024-11-16T05:47:44,122 DEBUG [3456ee6a3164:44501 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T05:47:44,123 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3456ee6a3164,46863,1731736063466 2024-11-16T05:47:44,126 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3456ee6a3164,46863,1731736063466, state=OPENING 2024-11-16T05:47:44,130 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T05:47:44,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44501-0x1004712aa1b0000, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:47:44,134 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46863-0x1004712aa1b0001, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:47:44,135 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T05:47:44,135 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T05:47:44,135 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3456ee6a3164,46863,1731736063466}] 2024-11-16T05:47:44,135 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T05:47:44,290 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T05:47:44,291 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35755, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T05:47:44,295 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T05:47:44,296 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T05:47:44,298 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3456ee6a3164%2C46863%2C1731736063466.meta, suffix=.meta, logDir=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466, archiveDir=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/oldWALs, maxLogs=32 2024-11-16T05:47:44,299 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta 2024-11-16T05:47:44,304 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta 2024-11-16T05:47:44,305 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T05:47:44,305 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T05:47:44,305 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39465:39465),(127.0.0.1/127.0.0.1:41573:41573)] 2024-11-16T05:47:44,306 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-16T05:47:44,306 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T05:47:44,306 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T05:47:44,306 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T05:47:44,306 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T05:47:44,306 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T05:47:44,306 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:47:44,307 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T05:47:44,307 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T05:47:44,308 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T05:47:44,309 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T05:47:44,309 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:47:44,310 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:47:44,310 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T05:47:44,311 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T05:47:44,311 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:47:44,311 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:47:44,312 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T05:47:44,333 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T05:47:44,333 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:47:44,334 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:47:44,335 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T05:47:44,335 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T05:47:44,336 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:47:44,336 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:47:44,336 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T05:47:44,337 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/hbase/meta/1588230740 2024-11-16T05:47:44,338 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/hbase/meta/1588230740 2024-11-16T05:47:44,340 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T05:47:44,340 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T05:47:44,340 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T05:47:44,341 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T05:47:44,342 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=703622, jitterRate=-0.10529866814613342}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T05:47:44,342 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T05:47:44,343 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731736064307Writing region info on filesystem at 1731736064307Initializing all the Stores at 1731736064308 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736064308Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736064308Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736064308Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736064308Cleaning up temporary data from old regions at 1731736064340 (+32 ms)Running coprocessor post-open hooks at 1731736064342 (+2 ms)Region opened successfully at 1731736064343 (+1 ms) 2024-11-16T05:47:44,344 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731736064289 2024-11-16T05:47:44,347 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T05:47:44,347 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T05:47:44,348 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,46863,1731736063466 2024-11-16T05:47:44,349 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3456ee6a3164,46863,1731736063466, state=OPEN 2024-11-16T05:47:44,351 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46863-0x1004712aa1b0001, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T05:47:44,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44501-0x1004712aa1b0000, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T05:47:44,351 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3456ee6a3164,46863,1731736063466 2024-11-16T05:47:44,351 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T05:47:44,351 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T05:47:44,354 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T05:47:44,354 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3456ee6a3164,46863,1731736063466 in 216 msec 2024-11-16T05:47:44,357 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T05:47:44,357 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 639 msec 2024-11-16T05:47:44,358 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T05:47:44,358 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T05:47:44,360 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T05:47:44,360 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,46863,1731736063466, seqNum=-1] 2024-11-16T05:47:44,360 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T05:47:44,361 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40107, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T05:47:44,368 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 718 msec 2024-11-16T05:47:44,368 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731736064368, completionTime=-1 2024-11-16T05:47:44,368 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T05:47:44,368 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-16T05:47:44,370 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-16T05:47:44,370 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731736124370 2024-11-16T05:47:44,370 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731736184370 2024-11-16T05:47:44,370 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-16T05:47:44,370 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,44501,1731736063421-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:44,371 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,44501,1731736063421-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:44,371 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,44501,1731736063421-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:44,371 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3456ee6a3164:44501, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:44,371 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:44,371 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:44,373 DEBUG [master/3456ee6a3164:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T05:47:44,375 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.865sec 2024-11-16T05:47:44,375 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T05:47:44,375 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T05:47:44,375 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T05:47:44,375 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T05:47:44,375 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T05:47:44,375 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,44501,1731736063421-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T05:47:44,375 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,44501,1731736063421-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T05:47:44,378 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T05:47:44,378 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T05:47:44,378 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,44501,1731736063421-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:44,394 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39ee0233, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T05:47:44,394 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,44501,-1 for getting cluster id 2024-11-16T05:47:44,395 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T05:47:44,396 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '4ce07069-b301-4bb6-8354-bff376268c5b' 2024-11-16T05:47:44,397 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T05:47:44,397 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "4ce07069-b301-4bb6-8354-bff376268c5b" 2024-11-16T05:47:44,397 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bfa870, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T05:47:44,397 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,44501,-1] 2024-11-16T05:47:44,398 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T05:47:44,398 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:47:44,399 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51686, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T05:47:44,400 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72aab0a4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T05:47:44,401 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T05:47:44,402 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,46863,1731736063466, seqNum=-1] 2024-11-16T05:47:44,402 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T05:47:44,404 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34004, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T05:47:44,406 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3456ee6a3164,44501,1731736063421 2024-11-16T05:47:44,406 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:47:44,409 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T05:47:44,424 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3456ee6a3164:0 server-side Connection retries=45 2024-11-16T05:47:44,424 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T05:47:44,424 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T05:47:44,424 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T05:47:44,424 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T05:47:44,424 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T05:47:44,424 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T05:47:44,425 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T05:47:44,425 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41701 2024-11-16T05:47:44,427 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41701 connecting to ZooKeeper ensemble=127.0.0.1:50879 2024-11-16T05:47:44,427 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:47:44,430 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:47:44,434 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:417010x0, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T05:47:44,434 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-16T05:47:44,434 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41701-0x1004712aa1b0002 connected 2024-11-16T05:47:44,434 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:41701-0x1004712aa1b0002, quorum=127.0.0.1:50879, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-16T05:47:44,435 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T05:47:44,435 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T05:47:44,436 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:41701-0x1004712aa1b0002, quorum=127.0.0.1:50879, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T05:47:44,437 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41701-0x1004712aa1b0002, quorum=127.0.0.1:50879, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T05:47:44,441 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41701 2024-11-16T05:47:44,442 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41701 2024-11-16T05:47:44,443 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41701 2024-11-16T05:47:44,444 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41701 2024-11-16T05:47:44,444 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41701 2024-11-16T05:47:44,446 INFO [RS:1;3456ee6a3164:41701 {}] regionserver.HRegionServer(746): ClusterId : 4ce07069-b301-4bb6-8354-bff376268c5b 2024-11-16T05:47:44,446 DEBUG [RS:1;3456ee6a3164:41701 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T05:47:44,448 DEBUG [RS:1;3456ee6a3164:41701 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T05:47:44,448 DEBUG [RS:1;3456ee6a3164:41701 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T05:47:44,450 DEBUG [RS:1;3456ee6a3164:41701 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T05:47:44,450 DEBUG [RS:1;3456ee6a3164:41701 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d14e7f4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3456ee6a3164/172.17.0.2:0 2024-11-16T05:47:44,468 DEBUG [RS:1;3456ee6a3164:41701 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;3456ee6a3164:41701 2024-11-16T05:47:44,468 INFO [RS:1;3456ee6a3164:41701 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T05:47:44,468 INFO [RS:1;3456ee6a3164:41701 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T05:47:44,468 DEBUG [RS:1;3456ee6a3164:41701 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T05:47:44,469 INFO [RS:1;3456ee6a3164:41701 {}] regionserver.HRegionServer(2659): reportForDuty to master=3456ee6a3164,44501,1731736063421 with port=41701, startcode=1731736064423 2024-11-16T05:47:44,470 DEBUG [RS:1;3456ee6a3164:41701 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T05:47:44,472 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43581, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T05:47:44,472 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44501 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3456ee6a3164,41701,1731736064423 2024-11-16T05:47:44,472 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44501 {}] master.ServerManager(517): Registering regionserver=3456ee6a3164,41701,1731736064423 2024-11-16T05:47:44,474 DEBUG [RS:1;3456ee6a3164:41701 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7 2024-11-16T05:47:44,474 DEBUG [RS:1;3456ee6a3164:41701 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36821 2024-11-16T05:47:44,474 DEBUG [RS:1;3456ee6a3164:41701 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T05:47:44,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44501-0x1004712aa1b0000, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T05:47:44,476 DEBUG [RS:1;3456ee6a3164:41701 {}] zookeeper.ZKUtil(111): regionserver:41701-0x1004712aa1b0002, quorum=127.0.0.1:50879, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3456ee6a3164,41701,1731736064423 2024-11-16T05:47:44,476 WARN [RS:1;3456ee6a3164:41701 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T05:47:44,476 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3456ee6a3164,41701,1731736064423] 2024-11-16T05:47:44,476 INFO [RS:1;3456ee6a3164:41701 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T05:47:44,476 DEBUG [RS:1;3456ee6a3164:41701 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423 2024-11-16T05:47:44,480 INFO [RS:1;3456ee6a3164:41701 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T05:47:44,482 INFO [RS:1;3456ee6a3164:41701 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T05:47:44,482 INFO [RS:1;3456ee6a3164:41701 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T05:47:44,482 INFO [RS:1;3456ee6a3164:41701 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:44,483 INFO [RS:1;3456ee6a3164:41701 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T05:47:44,484 INFO [RS:1;3456ee6a3164:41701 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T05:47:44,484 INFO [RS:1;3456ee6a3164:41701 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:44,484 DEBUG [RS:1;3456ee6a3164:41701 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:44,484 DEBUG [RS:1;3456ee6a3164:41701 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:44,484 DEBUG [RS:1;3456ee6a3164:41701 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:44,484 DEBUG [RS:1;3456ee6a3164:41701 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:44,484 DEBUG [RS:1;3456ee6a3164:41701 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:44,484 DEBUG [RS:1;3456ee6a3164:41701 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3456ee6a3164:0, corePoolSize=2, maxPoolSize=2 2024-11-16T05:47:44,484 DEBUG [RS:1;3456ee6a3164:41701 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:44,484 DEBUG [RS:1;3456ee6a3164:41701 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:44,485 DEBUG [RS:1;3456ee6a3164:41701 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:44,485 DEBUG [RS:1;3456ee6a3164:41701 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:44,485 DEBUG [RS:1;3456ee6a3164:41701 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:44,485 DEBUG [RS:1;3456ee6a3164:41701 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:47:44,485 DEBUG [RS:1;3456ee6a3164:41701 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0, corePoolSize=3, maxPoolSize=3 2024-11-16T05:47:44,485 DEBUG [RS:1;3456ee6a3164:41701 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0, corePoolSize=3, maxPoolSize=3 2024-11-16T05:47:44,488 INFO [RS:1;3456ee6a3164:41701 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:44,489 INFO [RS:1;3456ee6a3164:41701 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:44,489 INFO [RS:1;3456ee6a3164:41701 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:44,489 INFO [RS:1;3456ee6a3164:41701 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:44,489 INFO [RS:1;3456ee6a3164:41701 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:44,489 INFO [RS:1;3456ee6a3164:41701 {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,41701,1731736064423-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T05:47:44,504 INFO [RS:1;3456ee6a3164:41701 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T05:47:44,504 INFO [RS:1;3456ee6a3164:41701 {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,41701,1731736064423-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:44,504 INFO [RS:1;3456ee6a3164:41701 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:44,504 INFO [RS:1;3456ee6a3164:41701 {}] regionserver.Replication(171): 3456ee6a3164,41701,1731736064423 started 2024-11-16T05:47:44,517 INFO [RS:1;3456ee6a3164:41701 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:47:44,517 INFO [RS:1;3456ee6a3164:41701 {}] regionserver.HRegionServer(1482): Serving as 3456ee6a3164,41701,1731736064423, RpcServer on 3456ee6a3164/172.17.0.2:41701, sessionid=0x1004712aa1b0002 2024-11-16T05:47:44,517 DEBUG [RS:1;3456ee6a3164:41701 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T05:47:44,517 DEBUG [RS:1;3456ee6a3164:41701 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3456ee6a3164,41701,1731736064423 2024-11-16T05:47:44,517 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;3456ee6a3164:41701,5,FailOnTimeoutGroup] 2024-11-16T05:47:44,517 DEBUG [RS:1;3456ee6a3164:41701 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3456ee6a3164,41701,1731736064423' 2024-11-16T05:47:44,517 DEBUG [RS:1;3456ee6a3164:41701 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T05:47:44,518 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-16T05:47:44,518 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-16T05:47:44,518 DEBUG [RS:1;3456ee6a3164:41701 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T05:47:44,519 DEBUG [RS:1;3456ee6a3164:41701 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T05:47:44,519 DEBUG [RS:1;3456ee6a3164:41701 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T05:47:44,519 DEBUG [RS:1;3456ee6a3164:41701 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3456ee6a3164,41701,1731736064423 2024-11-16T05:47:44,519 DEBUG [RS:1;3456ee6a3164:41701 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3456ee6a3164,41701,1731736064423' 2024-11-16T05:47:44,519 DEBUG [RS:1;3456ee6a3164:41701 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T05:47:44,519 DEBUG [RS:1;3456ee6a3164:41701 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T05:47:44,519 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 3456ee6a3164,44501,1731736063421 2024-11-16T05:47:44,519 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@186e431a 2024-11-16T05:47:44,519 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-16T05:47:44,519 DEBUG [RS:1;3456ee6a3164:41701 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T05:47:44,520 INFO [RS:1;3456ee6a3164:41701 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T05:47:44,520 INFO [RS:1;3456ee6a3164:41701 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T05:47:44,521 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51702, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-16T05:47:44,522 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44501 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-16T05:47:44,522 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44501 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-16T05:47:44,522 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44501 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T05:47:44,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44501 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-16T05:47:44,525 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T05:47:44,525 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:47:44,525 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44501 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-16T05:47:44,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T05:47:44,527 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T05:47:44,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37035 is added to blk_1073741835_1011 (size=393) 2024-11-16T05:47:44,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40457 is added to blk_1073741835_1011 (size=393) 2024-11-16T05:47:44,535 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d0ad76cb64ef7ce60d3f54e1e7efc517, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7 2024-11-16T05:47:44,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37035 is added to blk_1073741836_1012 (size=76) 2024-11-16T05:47:44,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40457 is added to blk_1073741836_1012 (size=76) 2024-11-16T05:47:44,542 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:47:44,542 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing d0ad76cb64ef7ce60d3f54e1e7efc517, disabling compactions & flushes 2024-11-16T05:47:44,542 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517. 2024-11-16T05:47:44,542 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517. 2024-11-16T05:47:44,542 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517. after waiting 0 ms 2024-11-16T05:47:44,542 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517. 2024-11-16T05:47:44,542 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517. 2024-11-16T05:47:44,542 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for d0ad76cb64ef7ce60d3f54e1e7efc517: Waiting for close lock at 1731736064542Disabling compacts and flushes for region at 1731736064542Disabling writes for close at 1731736064542Writing region close event to WAL at 1731736064542Closed at 1731736064542 2024-11-16T05:47:44,544 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T05:47:44,544 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731736064544"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731736064544"}]},"ts":"1731736064544"} 2024-11-16T05:47:44,547 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-16T05:47:44,548 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T05:47:44,548 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731736064548"}]},"ts":"1731736064548"} 2024-11-16T05:47:44,550 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-16T05:47:44,551 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=d0ad76cb64ef7ce60d3f54e1e7efc517, ASSIGN}] 2024-11-16T05:47:44,552 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=d0ad76cb64ef7ce60d3f54e1e7efc517, ASSIGN 2024-11-16T05:47:44,554 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=d0ad76cb64ef7ce60d3f54e1e7efc517, ASSIGN; state=OFFLINE, location=3456ee6a3164,46863,1731736063466; forceNewPlan=false, retain=false 2024-11-16T05:47:44,625 INFO [RS:1;3456ee6a3164:41701 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3456ee6a3164%2C41701%2C1731736064423, suffix=, logDir=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423, archiveDir=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/oldWALs, maxLogs=32 2024-11-16T05:47:44,628 INFO [RS:1;3456ee6a3164:41701 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C41701%2C1731736064423.1731736064627 2024-11-16T05:47:44,635 INFO [RS:1;3456ee6a3164:41701 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 2024-11-16T05:47:44,637 DEBUG [RS:1;3456ee6a3164:41701 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39465:39465),(127.0.0.1/127.0.0.1:41573:41573)] 2024-11-16T05:47:44,678 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:47:44,686 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:47:44,705 INFO [3456ee6a3164:44501 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-16T05:47:44,705 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d0ad76cb64ef7ce60d3f54e1e7efc517, regionState=OPENING, regionLocation=3456ee6a3164,46863,1731736063466 2024-11-16T05:47:44,708 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=d0ad76cb64ef7ce60d3f54e1e7efc517, ASSIGN because future has completed 2024-11-16T05:47:44,708 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d0ad76cb64ef7ce60d3f54e1e7efc517, server=3456ee6a3164,46863,1731736063466}] 2024-11-16T05:47:44,873 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517. 2024-11-16T05:47:44,874 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => d0ad76cb64ef7ce60d3f54e1e7efc517, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517.', STARTKEY => '', ENDKEY => ''} 2024-11-16T05:47:44,875 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath d0ad76cb64ef7ce60d3f54e1e7efc517 2024-11-16T05:47:44,876 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:47:44,876 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for d0ad76cb64ef7ce60d3f54e1e7efc517 2024-11-16T05:47:44,876 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for d0ad76cb64ef7ce60d3f54e1e7efc517 2024-11-16T05:47:44,878 INFO [StoreOpener-d0ad76cb64ef7ce60d3f54e1e7efc517-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region d0ad76cb64ef7ce60d3f54e1e7efc517 2024-11-16T05:47:44,880 INFO [StoreOpener-d0ad76cb64ef7ce60d3f54e1e7efc517-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d0ad76cb64ef7ce60d3f54e1e7efc517 columnFamilyName info 2024-11-16T05:47:44,880 DEBUG [StoreOpener-d0ad76cb64ef7ce60d3f54e1e7efc517-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:47:44,881 INFO [StoreOpener-d0ad76cb64ef7ce60d3f54e1e7efc517-1 {}] regionserver.HStore(327): Store=d0ad76cb64ef7ce60d3f54e1e7efc517/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T05:47:44,881 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for d0ad76cb64ef7ce60d3f54e1e7efc517 2024-11-16T05:47:44,882 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517 2024-11-16T05:47:44,882 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517 2024-11-16T05:47:44,883 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for d0ad76cb64ef7ce60d3f54e1e7efc517 2024-11-16T05:47:44,883 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for d0ad76cb64ef7ce60d3f54e1e7efc517 2024-11-16T05:47:44,885 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for d0ad76cb64ef7ce60d3f54e1e7efc517 2024-11-16T05:47:44,888 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T05:47:44,888 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened d0ad76cb64ef7ce60d3f54e1e7efc517; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=728874, jitterRate=-0.07318927347660065}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T05:47:44,888 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d0ad76cb64ef7ce60d3f54e1e7efc517 2024-11-16T05:47:44,889 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for d0ad76cb64ef7ce60d3f54e1e7efc517: Running coprocessor pre-open hook at 1731736064876Writing region info on filesystem at 1731736064876Initializing all the Stores at 1731736064878 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736064878Cleaning up temporary data from old regions at 1731736064883 (+5 ms)Running coprocessor post-open hooks at 1731736064888 (+5 ms)Region opened successfully at 1731736064889 (+1 ms) 2024-11-16T05:47:44,891 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517., pid=6, masterSystemTime=1731736064862 2024-11-16T05:47:44,894 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517. 2024-11-16T05:47:44,894 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517. 2024-11-16T05:47:44,895 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d0ad76cb64ef7ce60d3f54e1e7efc517, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,46863,1731736063466 2024-11-16T05:47:44,899 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d0ad76cb64ef7ce60d3f54e1e7efc517, server=3456ee6a3164,46863,1731736063466 because future has completed 2024-11-16T05:47:44,904 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-16T05:47:44,904 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure d0ad76cb64ef7ce60d3f54e1e7efc517, server=3456ee6a3164,46863,1731736063466 in 193 msec 2024-11-16T05:47:44,908 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-16T05:47:44,908 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=d0ad76cb64ef7ce60d3f54e1e7efc517, ASSIGN in 353 msec 2024-11-16T05:47:44,910 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T05:47:44,910 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731736064910"}]},"ts":"1731736064910"} 2024-11-16T05:47:44,913 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-16T05:47:44,915 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T05:47:44,918 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 393 msec 2024-11-16T05:47:45,202 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T05:47:45,204 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:47:45,224 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:47:45,226 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:47:45,227 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:47:49,724 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-16T05:47:50,379 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T05:47:50,383 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:47:50,405 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:47:50,408 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:47:50,409 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:47:54,305 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T05:47:54,305 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-16T05:47:54,305 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-16T05:47:54,305 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-16T05:47:54,306 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T05:47:54,306 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-16T05:47:54,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T05:47:54,570 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-16T05:47:54,571 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-16T05:47:54,577 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-16T05:47:54,577 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517. 2024-11-16T05:47:54,592 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:47:54,595 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T05:47:54,595 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T05:47:54,595 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T05:47:54,595 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T05:47:54,596 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24345a11{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/hadoop.log.dir/,AVAILABLE} 2024-11-16T05:47:54,596 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@187c1982{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T05:47:54,695 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2ab2015c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/java.io.tmpdir/jetty-localhost-38765-hadoop-hdfs-3_4_1-tests_jar-_-any-14096581730706238326/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:47:54,695 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@40a289c6{HTTP/1.1, (http/1.1)}{localhost:38765} 2024-11-16T05:47:54,695 INFO [Time-limited test {}] server.Server(415): Started @116393ms 2024-11-16T05:47:54,696 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T05:47:54,726 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:47:54,730 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T05:47:54,731 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T05:47:54,731 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T05:47:54,731 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T05:47:54,731 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@44072a10{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/hadoop.log.dir/,AVAILABLE} 2024-11-16T05:47:54,732 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c6ba029{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T05:47:54,754 WARN [Thread-830 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data6/current/BP-721148889-172.17.0.2-1731736062832/current, will proceed with Du for space computation calculation, 2024-11-16T05:47:54,754 WARN [Thread-829 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data5/current/BP-721148889-172.17.0.2-1731736062832/current, will proceed with Du for space computation calculation, 2024-11-16T05:47:54,769 WARN [Thread-809 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T05:47:54,771 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x63560344601326e3 with lease ID 0xbb7795547e792d79: Processing first storage report for DS-b6fc0778-deca-4ea8-8017-355e28735fb0 from datanode DatanodeRegistration(127.0.0.1:33595, datanodeUuid=6c6b2bbe-587c-4d27-820d-03bc9d6cf66c, infoPort=33009, infoSecurePort=0, ipcPort=44213, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832) 2024-11-16T05:47:54,771 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x63560344601326e3 with lease ID 0xbb7795547e792d79: from storage DS-b6fc0778-deca-4ea8-8017-355e28735fb0 node DatanodeRegistration(127.0.0.1:33595, datanodeUuid=6c6b2bbe-587c-4d27-820d-03bc9d6cf66c, infoPort=33009, infoSecurePort=0, ipcPort=44213, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:47:54,772 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x63560344601326e3 with lease ID 0xbb7795547e792d79: Processing first storage report for DS-b667a227-b9ab-45e9-9e00-ac239b8658cd from datanode DatanodeRegistration(127.0.0.1:33595, datanodeUuid=6c6b2bbe-587c-4d27-820d-03bc9d6cf66c, infoPort=33009, infoSecurePort=0, ipcPort=44213, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832) 2024-11-16T05:47:54,772 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x63560344601326e3 with lease ID 0xbb7795547e792d79: from storage DS-b667a227-b9ab-45e9-9e00-ac239b8658cd node DatanodeRegistration(127.0.0.1:33595, datanodeUuid=6c6b2bbe-587c-4d27-820d-03bc9d6cf66c, infoPort=33009, infoSecurePort=0, ipcPort=44213, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:47:54,829 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@147c0b90{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/java.io.tmpdir/jetty-localhost-35367-hadoop-hdfs-3_4_1-tests_jar-_-any-830028230692331803/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:47:54,830 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5d14a9f1{HTTP/1.1, (http/1.1)}{localhost:35367} 2024-11-16T05:47:54,830 INFO [Time-limited test {}] server.Server(415): Started @116528ms 2024-11-16T05:47:54,831 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T05:47:54,860 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:47:54,863 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T05:47:54,864 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T05:47:54,864 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T05:47:54,864 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T05:47:54,864 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@329611e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/hadoop.log.dir/,AVAILABLE} 2024-11-16T05:47:54,865 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c16d06a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T05:47:54,891 WARN [Thread-865 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data8/current/BP-721148889-172.17.0.2-1731736062832/current, will proceed with Du for space computation calculation, 2024-11-16T05:47:54,891 WARN [Thread-864 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data7/current/BP-721148889-172.17.0.2-1731736062832/current, will proceed with Du for space computation calculation, 2024-11-16T05:47:54,916 WARN [Thread-844 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T05:47:54,919 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4819026cf5e12145 with lease ID 0xbb7795547e792d7a: Processing first storage report for DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9 from datanode DatanodeRegistration(127.0.0.1:38437, datanodeUuid=690ad6e9-1b13-4722-aa25-bbb44019a79d, infoPort=37373, infoSecurePort=0, ipcPort=37325, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832) 2024-11-16T05:47:54,919 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4819026cf5e12145 with lease ID 0xbb7795547e792d7a: from storage DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9 node DatanodeRegistration(127.0.0.1:38437, datanodeUuid=690ad6e9-1b13-4722-aa25-bbb44019a79d, infoPort=37373, infoSecurePort=0, ipcPort=37325, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:47:54,919 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4819026cf5e12145 with lease ID 0xbb7795547e792d7a: Processing first storage report for DS-bdf3a4d5-7773-41d5-a75a-7cfef06f315a from datanode DatanodeRegistration(127.0.0.1:38437, datanodeUuid=690ad6e9-1b13-4722-aa25-bbb44019a79d, infoPort=37373, infoSecurePort=0, ipcPort=37325, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832) 2024-11-16T05:47:54,919 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4819026cf5e12145 with lease ID 0xbb7795547e792d7a: from storage DS-bdf3a4d5-7773-41d5-a75a-7cfef06f315a node DatanodeRegistration(127.0.0.1:38437, datanodeUuid=690ad6e9-1b13-4722-aa25-bbb44019a79d, infoPort=37373, infoSecurePort=0, ipcPort=37325, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:47:54,964 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2902c156{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/java.io.tmpdir/jetty-localhost-37399-hadoop-hdfs-3_4_1-tests_jar-_-any-10610280074411271097/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:47:54,965 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@77334da9{HTTP/1.1, (http/1.1)}{localhost:37399} 2024-11-16T05:47:54,965 INFO [Time-limited test {}] server.Server(415): Started @116663ms 2024-11-16T05:47:54,966 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T05:47:55,023 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data9/current/BP-721148889-172.17.0.2-1731736062832/current, will proceed with Du for space computation calculation, 2024-11-16T05:47:55,023 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data10/current/BP-721148889-172.17.0.2-1731736062832/current, will proceed with Du for space computation calculation, 2024-11-16T05:47:55,040 WARN [Thread-879 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T05:47:55,043 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xffa809c025939bec with lease ID 0xbb7795547e792d7b: Processing first storage report for DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b from datanode DatanodeRegistration(127.0.0.1:32791, datanodeUuid=25a67b48-ad0f-49d3-8799-b7e747f1b53b, infoPort=35433, infoSecurePort=0, ipcPort=39847, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832) 2024-11-16T05:47:55,043 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xffa809c025939bec with lease ID 0xbb7795547e792d7b: from storage DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b node DatanodeRegistration(127.0.0.1:32791, datanodeUuid=25a67b48-ad0f-49d3-8799-b7e747f1b53b, infoPort=35433, infoSecurePort=0, ipcPort=39847, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:47:55,043 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xffa809c025939bec with lease ID 0xbb7795547e792d7b: Processing first storage report for DS-be3d0e51-ad0a-4256-bc56-2eec7682cf8d from datanode DatanodeRegistration(127.0.0.1:32791, datanodeUuid=25a67b48-ad0f-49d3-8799-b7e747f1b53b, infoPort=35433, infoSecurePort=0, ipcPort=39847, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832) 2024-11-16T05:47:55,043 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xffa809c025939bec with lease ID 0xbb7795547e792d7b: from storage DS-be3d0e51-ad0a-4256-bc56-2eec7682cf8d node DatanodeRegistration(127.0.0.1:32791, datanodeUuid=25a67b48-ad0f-49d3-8799-b7e747f1b53b, infoPort=35433, infoSecurePort=0, ipcPort=39847, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:47:55,083 WARN [ResponseProcessor for block BP-721148889-172.17.0.2-1731736062832:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-721148889-172.17.0.2-1731736062832:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:47:55,083 WARN [ResponseProcessor for block BP-721148889-172.17.0.2-1731736062832:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-721148889-172.17.0.2-1731736062832:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:47:55,083 WARN [ResponseProcessor for block BP-721148889-172.17.0.2-1731736062832:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-721148889-172.17.0.2-1731736062832:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-721148889-172.17.0.2-1731736062832:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:47:55,083 WARN [ResponseProcessor for block BP-721148889-172.17.0.2-1731736062832:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-721148889-172.17.0.2-1731736062832:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:47:55,084 WARN [DataStreamer for file /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta block BP-721148889-172.17.0.2-1731736062832:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK], DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK]) is bad. 2024-11-16T05:47:55,084 WARN [DataStreamer for file /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736063883 block BP-721148889-172.17.0.2-1731736062832:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK], DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK]) is bad. 2024-11-16T05:47:55,084 WARN [DataStreamer for file /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/WALs/3456ee6a3164,44501,1731736063421/3456ee6a3164%2C44501%2C1731736063421.1731736063577 block BP-721148889-172.17.0.2-1731736062832:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK], DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK]) is bad. 2024-11-16T05:47:55,084 WARN [DataStreamer for file /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 block BP-721148889-172.17.0.2-1731736062832:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK], DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK]) is bad. 2024-11-16T05:47:55,084 WARN [PacketResponder: BP-721148889-172.17.0.2-1731736062832:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:37035] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:47:55,085 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-602501559_22 at /127.0.0.1:38346 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37035:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38346 dst: /127.0.0.1:37035 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:47:55,085 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1124001647_22 at /127.0.0.1:38436 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:37035:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38436 dst: /127.0.0.1:37035 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:47:55,085 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:38406 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37035:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38406 dst: /127.0.0.1:37035 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:47:55,086 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:54186 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:40457:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54186 dst: /127.0.0.1:40457 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:47:55,086 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:38388 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37035:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38388 dst: /127.0.0.1:37035 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:47:55,086 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:54192 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:40457:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54192 dst: /127.0.0.1:40457 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:47:55,087 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1124001647_22 at /127.0.0.1:54216 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:40457:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54216 dst: /127.0.0.1:40457 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:47:55,088 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-602501559_22 at /127.0.0.1:54170 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:40457:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54170 dst: /127.0.0.1:40457 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:47:55,088 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3f1aa8ab{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:47:55,089 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@779704d3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T05:47:55,089 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T05:47:55,089 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5c5cbc59{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T05:47:55,090 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54c0bf3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/hadoop.log.dir/,STOPPED} 2024-11-16T05:47:55,091 WARN [BP-721148889-172.17.0.2-1731736062832 heartbeating to localhost/127.0.0.1:36821 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T05:47:55,091 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T05:47:55,091 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T05:47:55,091 WARN [BP-721148889-172.17.0.2-1731736062832 heartbeating to localhost/127.0.0.1:36821 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-721148889-172.17.0.2-1731736062832 (Datanode Uuid aadbfb3b-707a-4c20-b5d9-9812720a1509) service to localhost/127.0.0.1:36821 2024-11-16T05:47:55,092 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data3/current/BP-721148889-172.17.0.2-1731736062832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:47:55,092 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data4/current/BP-721148889-172.17.0.2-1731736062832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:47:55,093 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T05:47:55,093 WARN [DataStreamer for file /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736063883 block BP-721148889-172.17.0.2-1731736062832:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:47:55,093 WARN [DataStreamer for file /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta block BP-721148889-172.17.0.2-1731736062832:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:47:55,093 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@7792f625 {}] datanode.DataXceiver(331): 127.0.0.1:40457:DataXceiver error processing unknown operation src: /127.0.0.1:51950 dst: /127.0.0.1:40457 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:47:55,094 WARN [DataStreamer for file /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/WALs/3456ee6a3164,44501,1731736063421/3456ee6a3164%2C44501%2C1731736063421.1731736063577 block BP-721148889-172.17.0.2-1731736062832:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:47:55,094 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1124001647_22 at /127.0.0.1:51948 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:40457:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51948 dst: /127.0.0.1:40457 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:47:55,097 WARN [ResponseProcessor for block BP-721148889-172.17.0.2-1731736062832:blk_1073741837_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-721148889-172.17.0.2-1731736062832:blk_1073741837_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:47:55,101 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7a34f554{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:47:55,101 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@225c4391{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T05:47:55,101 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T05:47:55,101 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a95d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T05:47:55,101 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c43bbf6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/hadoop.log.dir/,STOPPED} 2024-11-16T05:47:55,102 WARN [BP-721148889-172.17.0.2-1731736062832 heartbeating to localhost/127.0.0.1:36821 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T05:47:55,102 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T05:47:55,102 WARN [BP-721148889-172.17.0.2-1731736062832 heartbeating to localhost/127.0.0.1:36821 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-721148889-172.17.0.2-1731736062832 (Datanode Uuid ac5ff1c1-40f4-4466-8380-85ac8b35eb01) service to localhost/127.0.0.1:36821 2024-11-16T05:47:55,102 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T05:47:55,103 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data1/current/BP-721148889-172.17.0.2-1731736062832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:47:55,103 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data2/current/BP-721148889-172.17.0.2-1731736062832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:47:55,103 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T05:47:55,107 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517., hostname=3456ee6a3164,46863,1731736063466, seqNum=2] 2024-11-16T05:47:55,108 ERROR [FSHLog-0-hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7-prefix:3456ee6a3164,46863,1731736063466 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:47:55,109 WARN [FSHLog-0-hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7-prefix:3456ee6a3164,46863,1731736063466 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:47:55,109 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:47:55,109 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3456ee6a3164%2C46863%2C1731736063466:(num 1731736063883) roll requested 2024-11-16T05:47:55,109 INFO [regionserver/3456ee6a3164:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C46863%2C1731736063466.1731736075109 2024-11-16T05:47:55,112 WARN [Thread-905 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741838_1018 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:47:55,112 WARN [Thread-905 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK], DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK]) is bad. 2024-11-16T05:47:55,112 WARN [Thread-905 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741838_1018 2024-11-16T05:47:55,115 WARN [Thread-905 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK] 2024-11-16T05:47:55,121 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:55,121 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:55,121 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:55,122 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:55,122 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:55,122 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736063883 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736075109 2024-11-16T05:47:55,122 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:47:55,122 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:47:55,123 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-16T05:47:55,123 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-16T05:47:55,124 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736063883 2024-11-16T05:47:55,125 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35433:35433),(127.0.0.1/127.0.0.1:37373:37373)] 2024-11-16T05:47:55,125 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736063883 is not closed yet, will try archiving it next time 2024-11-16T05:47:55,126 WARN [IPC Server handler 3 on default port 36821 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736063883 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741833_1009 2024-11-16T05:47:55,129 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736063883 after 4ms 2024-11-16T05:47:55,556 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:47:56,488 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:47:57,125 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:47:57,127 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736075109 2024-11-16T05:47:57,128 WARN [ResponseProcessor for block BP-721148889-172.17.0.2-1731736062832:blk_1073741839_1019 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-721148889-172.17.0.2-1731736062832:blk_1073741839_1019 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:47:57,128 WARN [DataStreamer for file /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736075109 block BP-721148889-172.17.0.2-1731736062832:blk_1073741839_1019 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741839_1019 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK], DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK]) is bad. 2024-11-16T05:47:57,129 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:48582 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:32791:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48582 dst: /127.0.0.1:32791 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:47:57,130 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:51428 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:38437:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51428 dst: /127.0.0.1:38437 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:47:57,131 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2902c156{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:47:57,132 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@77334da9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T05:47:57,132 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T05:47:57,132 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c16d06a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T05:47:57,133 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@329611e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/hadoop.log.dir/,STOPPED} 2024-11-16T05:47:57,135 WARN [BP-721148889-172.17.0.2-1731736062832 heartbeating to localhost/127.0.0.1:36821 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T05:47:57,135 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T05:47:57,135 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T05:47:57,135 WARN [BP-721148889-172.17.0.2-1731736062832 heartbeating to localhost/127.0.0.1:36821 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-721148889-172.17.0.2-1731736062832 (Datanode Uuid 25a67b48-ad0f-49d3-8799-b7e747f1b53b) service to localhost/127.0.0.1:36821 2024-11-16T05:47:57,136 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data9/current/BP-721148889-172.17.0.2-1731736062832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:47:57,136 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data10/current/BP-721148889-172.17.0.2-1731736062832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:47:57,136 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T05:47:57,557 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:47:58,489 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:47:59,126 WARN [regionserver/3456ee6a3164:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK]] 2024-11-16T05:47:59,127 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:47:59,127 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3456ee6a3164%2C46863%2C1731736063466:(num 1731736075109) roll requested 2024-11-16T05:47:59,128 INFO [regionserver/3456ee6a3164:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C46863%2C1731736063466.1731736079127 2024-11-16T05:47:59,131 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736063883 after 4007ms 2024-11-16T05:47:59,132 WARN [Thread-914 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:47:59,133 WARN [Thread-914 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK], DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK]) is bad. 2024-11-16T05:47:59,133 WARN [Thread-914 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741840_1022 2024-11-16T05:47:59,134 WARN [Thread-914 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK] 2024-11-16T05:47:59,141 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T05:47:59,143 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:59,143 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:59,143 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:59,143 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:59,143 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:47:59,144 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736075109 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736079127 2024-11-16T05:47:59,145 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37373:37373),(127.0.0.1/127.0.0.1:33009:33009)] 2024-11-16T05:47:59,145 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736063883 is not closed yet, will try archiving it next time 2024-11-16T05:47:59,145 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736075109 is not closed yet, will try archiving it next time 2024-11-16T05:47:59,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38437 is added to blk_1073741839_1021 (size=2431) 2024-11-16T05:47:59,548 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736063883 is not closed yet, will try archiving it next time 2024-11-16T05:47:59,558 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:00,489 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:01,145 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:01,147 WARN [ResponseProcessor for block BP-721148889-172.17.0.2-1731736062832:blk_1073741841_1023 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-721148889-172.17.0.2-1731736062832:blk_1073741841_1023 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:01,148 WARN [DataStreamer for file /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736079127 block BP-721148889-172.17.0.2-1731736062832:blk_1073741841_1023 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK], DatanodeInfoWithStorage[127.0.0.1:33595,DS-b6fc0778-deca-4ea8-8017-355e28735fb0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK]) is bad. 2024-11-16T05:48:01,148 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:34376 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:38437:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34376 dst: /127.0.0.1:38437 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:01,149 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57004 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:33595:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57004 dst: /127.0.0.1:33595 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:01,151 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@147c0b90{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:48:01,152 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5d14a9f1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T05:48:01,152 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T05:48:01,152 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c6ba029{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T05:48:01,152 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@44072a10{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/hadoop.log.dir/,STOPPED} 2024-11-16T05:48:01,154 WARN [BP-721148889-172.17.0.2-1731736062832 heartbeating to localhost/127.0.0.1:36821 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T05:48:01,154 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T05:48:01,154 WARN [BP-721148889-172.17.0.2-1731736062832 heartbeating to localhost/127.0.0.1:36821 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-721148889-172.17.0.2-1731736062832 (Datanode Uuid 690ad6e9-1b13-4722-aa25-bbb44019a79d) service to localhost/127.0.0.1:36821 2024-11-16T05:48:01,154 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T05:48:01,155 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data7/current/BP-721148889-172.17.0.2-1731736062832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:48:01,155 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data8/current/BP-721148889-172.17.0.2-1731736062832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:48:01,155 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T05:48:01,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46863 {}] regionserver.HRegion(8855): Flush requested on d0ad76cb64ef7ce60d3f54e1e7efc517 2024-11-16T05:48:01,164 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d0ad76cb64ef7ce60d3f54e1e7efc517 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T05:48:01,184 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/.tmp/info/b8d9f647a7614b8197d4fb956968ceea is 1080, key is row0002/info:/1731736077137/Put/seqid=0 2024-11-16T05:48:01,186 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:01,186 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK], DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK]) is bad. 2024-11-16T05:48:01,186 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741842_1025 2024-11-16T05:48:01,187 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK] 2024-11-16T05:48:01,189 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40457 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:01,189 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33595,DS-b6fc0778-deca-4ea8-8017-355e28735fb0,DISK], DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]) is bad. 2024-11-16T05:48:01,189 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57018 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741843_1026] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data6]'}, localName='127.0.0.1:33595', datanodeUuid='6c6b2bbe-587c-4d27-820d-03bc9d6cf66c', xmitsInProgress=0}:Exception transferring block BP-721148889-172.17.0.2-1731736062832:blk_1073741843_1026 to mirror 127.0.0.1:40457 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:01,189 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741843_1026 2024-11-16T05:48:01,190 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57018 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741843_1026] {}] datanode.BlockReceiver(316): Block 1073741843 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T05:48:01,190 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57018 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741843_1026] {}] datanode.DataXceiver(331): 127.0.0.1:33595:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57018 dst: /127.0.0.1:33595 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:01,190 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK] 2024-11-16T05:48:01,191 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:01,191 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK], DatanodeInfoWithStorage[127.0.0.1:33595,DS-b6fc0778-deca-4ea8-8017-355e28735fb0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK]) is bad. 2024-11-16T05:48:01,191 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741844_1027 2024-11-16T05:48:01,192 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK] 2024-11-16T05:48:01,193 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:01,193 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK], DatanodeInfoWithStorage[127.0.0.1:33595,DS-b6fc0778-deca-4ea8-8017-355e28735fb0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK]) is bad. 2024-11-16T05:48:01,193 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741845_1028 2024-11-16T05:48:01,194 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK] 2024-11-16T05:48:01,195 WARN [IPC Server handler 2 on default port 36821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T05:48:01,195 WARN [IPC Server handler 2 on default port 36821 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T05:48:01,196 WARN [IPC Server handler 2 on default port 36821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T05:48:01,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741846_1029 (size=10347) 2024-11-16T05:48:01,558 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:01,600 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/.tmp/info/b8d9f647a7614b8197d4fb956968ceea 2024-11-16T05:48:01,613 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/.tmp/info/b8d9f647a7614b8197d4fb956968ceea as hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/b8d9f647a7614b8197d4fb956968ceea 2024-11-16T05:48:01,620 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/b8d9f647a7614b8197d4fb956968ceea, entries=5, sequenceid=11, filesize=10.1 K 2024-11-16T05:48:01,621 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for d0ad76cb64ef7ce60d3f54e1e7efc517 in 458ms, sequenceid=11, compaction requested=false 2024-11-16T05:48:01,621 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d0ad76cb64ef7ce60d3f54e1e7efc517: 2024-11-16T05:48:01,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46863 {}] regionserver.HRegion(8855): Flush requested on d0ad76cb64ef7ce60d3f54e1e7efc517 2024-11-16T05:48:01,794 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d0ad76cb64ef7ce60d3f54e1e7efc517 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-16T05:48:01,802 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/.tmp/info/7086daede5af40a7bb270498dfe05f04 is 1080, key is row0007/info:/1731736081165/Put/seqid=0 2024-11-16T05:48:01,805 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:01,805 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK], DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]) is bad. 2024-11-16T05:48:01,805 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741847_1030 2024-11-16T05:48:01,806 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK] 2024-11-16T05:48:01,807 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:01,807 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK], DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK]) is bad. 2024-11-16T05:48:01,807 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741848_1031 2024-11-16T05:48:01,808 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK] 2024-11-16T05:48:01,809 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:01,809 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK], DatanodeInfoWithStorage[127.0.0.1:33595,DS-b6fc0778-deca-4ea8-8017-355e28735fb0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK]) is bad. 2024-11-16T05:48:01,809 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741849_1032 2024-11-16T05:48:01,810 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK] 2024-11-16T05:48:01,812 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32791 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:01,812 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57042 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741850_1033] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data6]'}, localName='127.0.0.1:33595', datanodeUuid='6c6b2bbe-587c-4d27-820d-03bc9d6cf66c', xmitsInProgress=0}:Exception transferring block BP-721148889-172.17.0.2-1731736062832:blk_1073741850_1033 to mirror 127.0.0.1:32791 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:01,812 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33595,DS-b6fc0778-deca-4ea8-8017-355e28735fb0,DISK], DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK]) is bad. 2024-11-16T05:48:01,812 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741850_1033 2024-11-16T05:48:01,812 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57042 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741850_1033] {}] datanode.BlockReceiver(316): Block 1073741850 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T05:48:01,812 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57042 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741850_1033] {}] datanode.DataXceiver(331): 127.0.0.1:33595:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57042 dst: /127.0.0.1:33595 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:01,813 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK] 2024-11-16T05:48:01,813 WARN [IPC Server handler 4 on default port 36821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T05:48:01,813 WARN [IPC Server handler 4 on default port 36821 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T05:48:01,813 WARN [IPC Server handler 4 on default port 36821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T05:48:01,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741851_1034 (size=12506) 2024-11-16T05:48:02,217 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/.tmp/info/7086daede5af40a7bb270498dfe05f04 2024-11-16T05:48:02,226 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/.tmp/info/7086daede5af40a7bb270498dfe05f04 as hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/7086daede5af40a7bb270498dfe05f04 2024-11-16T05:48:02,233 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/7086daede5af40a7bb270498dfe05f04, entries=7, sequenceid=24, filesize=12.2 K 2024-11-16T05:48:02,234 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for d0ad76cb64ef7ce60d3f54e1e7efc517 in 440ms, sequenceid=24, compaction requested=false 2024-11-16T05:48:02,234 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d0ad76cb64ef7ce60d3f54e1e7efc517: 2024-11-16T05:48:02,234 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-16T05:48:02,235 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:48:02,235 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/7086daede5af40a7bb270498dfe05f04 because midkey is the same as first or last row 2024-11-16T05:48:02,490 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:03,146 WARN [regionserver/3456ee6a3164:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33595,DS-b6fc0778-deca-4ea8-8017-355e28735fb0,DISK]] 2024-11-16T05:48:03,146 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:03,147 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3456ee6a3164%2C46863%2C1731736063466:(num 1731736079127) roll requested 2024-11-16T05:48:03,149 INFO [regionserver/3456ee6a3164:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C46863%2C1731736063466.1731736083147 2024-11-16T05:48:03,154 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32791 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:03,154 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57072 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741852_1035] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data6]'}, localName='127.0.0.1:33595', datanodeUuid='6c6b2bbe-587c-4d27-820d-03bc9d6cf66c', xmitsInProgress=0}:Exception transferring block BP-721148889-172.17.0.2-1731736062832:blk_1073741852_1035 to mirror 127.0.0.1:32791 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:03,154 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33595,DS-b6fc0778-deca-4ea8-8017-355e28735fb0,DISK], DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK]) is bad. 2024-11-16T05:48:03,154 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741852_1035 2024-11-16T05:48:03,154 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57072 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741852_1035] {}] datanode.BlockReceiver(316): Block 1073741852 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-16T05:48:03,154 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57072 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741852_1035] {}] datanode.DataXceiver(331): 127.0.0.1:33595:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57072 dst: /127.0.0.1:33595 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:03,155 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK] 2024-11-16T05:48:03,156 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:03,156 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK], DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK]) is bad. 2024-11-16T05:48:03,156 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741853_1036 2024-11-16T05:48:03,156 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK] 2024-11-16T05:48:03,158 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:03,158 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK], DatanodeInfoWithStorage[127.0.0.1:33595,DS-b6fc0778-deca-4ea8-8017-355e28735fb0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK]) is bad. 2024-11-16T05:48:03,158 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741854_1037 2024-11-16T05:48:03,158 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK] 2024-11-16T05:48:03,160 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40457 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:03,160 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57080 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741855_1038] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data6]'}, localName='127.0.0.1:33595', datanodeUuid='6c6b2bbe-587c-4d27-820d-03bc9d6cf66c', xmitsInProgress=0}:Exception transferring block BP-721148889-172.17.0.2-1731736062832:blk_1073741855_1038 to mirror 127.0.0.1:40457 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:03,160 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33595,DS-b6fc0778-deca-4ea8-8017-355e28735fb0,DISK], DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]) is bad. 2024-11-16T05:48:03,160 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57080 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741855_1038] {}] datanode.BlockReceiver(316): Block 1073741855 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-16T05:48:03,160 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741855_1038 2024-11-16T05:48:03,160 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57080 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741855_1038] {}] datanode.DataXceiver(331): 127.0.0.1:33595:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57080 dst: /127.0.0.1:33595 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:03,161 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK] 2024-11-16T05:48:03,162 WARN [IPC Server handler 0 on default port 36821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T05:48:03,162 WARN [IPC Server handler 0 on default port 36821 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T05:48:03,162 WARN [IPC Server handler 0 on default port 36821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T05:48:03,164 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:03,164 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:03,164 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:03,164 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:03,164 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:03,165 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736079127 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736083147 2024-11-16T05:48:03,166 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33009:33009)] 2024-11-16T05:48:03,166 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736063883 is not closed yet, will try archiving it next time 2024-11-16T05:48:03,166 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736079127 is not closed yet, will try archiving it next time 2024-11-16T05:48:03,166 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736075109 to hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/oldWALs/3456ee6a3164%2C46863%2C1731736063466.1731736075109 2024-11-16T05:48:03,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741841_1024 (size=25992) 2024-11-16T05:48:03,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46863 {}] regionserver.HRegion(8855): Flush requested on d0ad76cb64ef7ce60d3f54e1e7efc517 2024-11-16T05:48:03,230 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d0ad76cb64ef7ce60d3f54e1e7efc517 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-16T05:48:03,237 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/.tmp/info/4b11b97ab2a040ed91f3dab14a4d0769 is 1079, key is tmprow/info:/1731736083229/Put/seqid=0 2024-11-16T05:48:03,239 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:03,240 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK], DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK]) is bad. 2024-11-16T05:48:03,240 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741857_1040 2024-11-16T05:48:03,240 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK] 2024-11-16T05:48:03,243 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40457 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:03,243 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57096 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741858_1041] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data6]'}, localName='127.0.0.1:33595', datanodeUuid='6c6b2bbe-587c-4d27-820d-03bc9d6cf66c', xmitsInProgress=0}:Exception transferring block BP-721148889-172.17.0.2-1731736062832:blk_1073741858_1041 to mirror 127.0.0.1:40457 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:03,243 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33595,DS-b6fc0778-deca-4ea8-8017-355e28735fb0,DISK], DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]) is bad. 2024-11-16T05:48:03,243 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57096 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741858_1041] {}] datanode.BlockReceiver(316): Block 1073741858 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T05:48:03,243 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741858_1041 2024-11-16T05:48:03,243 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57096 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741858_1041] {}] datanode.DataXceiver(331): 127.0.0.1:33595:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57096 dst: /127.0.0.1:33595 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:03,244 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK] 2024-11-16T05:48:03,245 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:03,246 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK], DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK]) is bad. 2024-11-16T05:48:03,246 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741859_1042 2024-11-16T05:48:03,247 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK] 2024-11-16T05:48:03,249 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37035 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:03,249 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57098 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741860_1043] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data6]'}, localName='127.0.0.1:33595', datanodeUuid='6c6b2bbe-587c-4d27-820d-03bc9d6cf66c', xmitsInProgress=0}:Exception transferring block BP-721148889-172.17.0.2-1731736062832:blk_1073741860_1043 to mirror 127.0.0.1:37035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:03,249 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33595,DS-b6fc0778-deca-4ea8-8017-355e28735fb0,DISK], DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK]) is bad. 2024-11-16T05:48:03,250 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57098 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741860_1043] {}] datanode.BlockReceiver(316): Block 1073741860 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T05:48:03,250 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741860_1043 2024-11-16T05:48:03,250 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57098 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741860_1043] {}] datanode.DataXceiver(331): 127.0.0.1:33595:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57098 dst: /127.0.0.1:33595 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:03,250 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK] 2024-11-16T05:48:03,251 WARN [IPC Server handler 3 on default port 36821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T05:48:03,251 WARN [IPC Server handler 3 on default port 36821 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T05:48:03,251 WARN [IPC Server handler 3 on default port 36821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T05:48:03,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741861_1044 (size=6027) 2024-11-16T05:48:03,559 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:03,569 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736063883 is not closed yet, will try archiving it next time 2024-11-16T05:48:03,660 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/.tmp/info/4b11b97ab2a040ed91f3dab14a4d0769 2024-11-16T05:48:03,671 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/.tmp/info/4b11b97ab2a040ed91f3dab14a4d0769 as hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/4b11b97ab2a040ed91f3dab14a4d0769 2024-11-16T05:48:03,679 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/4b11b97ab2a040ed91f3dab14a4d0769, entries=1, sequenceid=34, filesize=5.9 K 2024-11-16T05:48:03,681 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for d0ad76cb64ef7ce60d3f54e1e7efc517 in 450ms, sequenceid=34, compaction requested=true 2024-11-16T05:48:03,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d0ad76cb64ef7ce60d3f54e1e7efc517: 2024-11-16T05:48:03,681 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-16T05:48:03,681 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:48:03,681 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/7086daede5af40a7bb270498dfe05f04 because midkey is the same as first or last row 2024-11-16T05:48:03,681 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d0ad76cb64ef7ce60d3f54e1e7efc517:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T05:48:03,681 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T05:48:03,682 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T05:48:03,683 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T05:48:03,683 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.HStore(1541): d0ad76cb64ef7ce60d3f54e1e7efc517/info is initiating minor compaction (all files) 2024-11-16T05:48:03,683 INFO [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of d0ad76cb64ef7ce60d3f54e1e7efc517/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517. 2024-11-16T05:48:03,684 INFO [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/b8d9f647a7614b8197d4fb956968ceea, hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/7086daede5af40a7bb270498dfe05f04, hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/4b11b97ab2a040ed91f3dab14a4d0769] into tmpdir=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/.tmp, totalSize=28.2 K 2024-11-16T05:48:03,684 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] compactions.Compactor(225): Compacting b8d9f647a7614b8197d4fb956968ceea, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731736077137 2024-11-16T05:48:03,685 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7086daede5af40a7bb270498dfe05f04, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731736081165 2024-11-16T05:48:03,685 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4b11b97ab2a040ed91f3dab14a4d0769, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731736083229 2024-11-16T05:48:03,701 INFO [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d0ad76cb64ef7ce60d3f54e1e7efc517#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T05:48:03,702 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/.tmp/info/c51c492df92b4822bad124bc007dcad7 is 1080, key is row0002/info:/1731736077137/Put/seqid=0 2024-11-16T05:48:03,704 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:03,704 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK], DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK]) is bad. 2024-11-16T05:48:03,704 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741862_1045 2024-11-16T05:48:03,704 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK] 2024-11-16T05:48:03,706 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:03,706 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK], DatanodeInfoWithStorage[127.0.0.1:33595,DS-b6fc0778-deca-4ea8-8017-355e28735fb0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]) is bad. 2024-11-16T05:48:03,706 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741863_1046 2024-11-16T05:48:03,706 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK] 2024-11-16T05:48:03,707 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:03,708 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK], DatanodeInfoWithStorage[127.0.0.1:33595,DS-b6fc0778-deca-4ea8-8017-355e28735fb0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK]) is bad. 2024-11-16T05:48:03,708 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741864_1047 2024-11-16T05:48:03,708 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK] 2024-11-16T05:48:03,710 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32791 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:03,710 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57126 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741865_1048] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data6]'}, localName='127.0.0.1:33595', datanodeUuid='6c6b2bbe-587c-4d27-820d-03bc9d6cf66c', xmitsInProgress=0}:Exception transferring block BP-721148889-172.17.0.2-1731736062832:blk_1073741865_1048 to mirror 127.0.0.1:32791 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:03,710 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33595,DS-b6fc0778-deca-4ea8-8017-355e28735fb0,DISK], DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK]) is bad. 2024-11-16T05:48:03,710 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741865_1048 2024-11-16T05:48:03,710 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57126 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741865_1048] {}] datanode.BlockReceiver(316): Block 1073741865 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T05:48:03,710 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57126 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741865_1048] {}] datanode.DataXceiver(331): 127.0.0.1:33595:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57126 dst: /127.0.0.1:33595 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:03,711 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK] 2024-11-16T05:48:03,711 WARN [IPC Server handler 4 on default port 36821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T05:48:03,711 WARN [IPC Server handler 4 on default port 36821 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T05:48:03,711 WARN [IPC Server handler 4 on default port 36821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T05:48:03,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741866_1049 (size=17994) 2024-11-16T05:48:04,130 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/.tmp/info/c51c492df92b4822bad124bc007dcad7 as hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/c51c492df92b4822bad124bc007dcad7 2024-11-16T05:48:04,138 INFO [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in d0ad76cb64ef7ce60d3f54e1e7efc517/info of d0ad76cb64ef7ce60d3f54e1e7efc517 into c51c492df92b4822bad124bc007dcad7(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T05:48:04,138 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for d0ad76cb64ef7ce60d3f54e1e7efc517: 2024-11-16T05:48:04,138 INFO [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517., storeName=d0ad76cb64ef7ce60d3f54e1e7efc517/info, priority=13, startTime=1731736083681; duration=0sec 2024-11-16T05:48:04,138 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-16T05:48:04,138 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:48:04,138 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/c51c492df92b4822bad124bc007dcad7 because midkey is the same as first or last row 2024-11-16T05:48:04,138 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-16T05:48:04,138 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:48:04,138 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/c51c492df92b4822bad124bc007dcad7 because midkey is the same as first or last row 2024-11-16T05:48:04,139 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-16T05:48:04,139 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:48:04,139 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/c51c492df92b4822bad124bc007dcad7 because midkey is the same as first or last row 2024-11-16T05:48:04,139 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T05:48:04,139 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d0ad76cb64ef7ce60d3f54e1e7efc517:info 2024-11-16T05:48:04,490 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:04,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46863 {}] regionserver.HRegion(8855): Flush requested on d0ad76cb64ef7ce60d3f54e1e7efc517 2024-11-16T05:48:04,666 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d0ad76cb64ef7ce60d3f54e1e7efc517 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-16T05:48:04,674 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/.tmp/info/af217228052c4f3ba40e1f10b3daf66f is 1079, key is tmprow/info:/1731736084663/Put/seqid=0 2024-11-16T05:48:04,677 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:04,677 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK], DatanodeInfoWithStorage[127.0.0.1:33595,DS-b6fc0778-deca-4ea8-8017-355e28735fb0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]) is bad. 2024-11-16T05:48:04,677 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741867_1050 2024-11-16T05:48:04,678 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK] 2024-11-16T05:48:04,679 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:04,679 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK], DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK]) is bad. 2024-11-16T05:48:04,679 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741868_1051 2024-11-16T05:48:04,680 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK] 2024-11-16T05:48:04,682 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32791 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:04,682 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57152 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741869_1052] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data6]'}, localName='127.0.0.1:33595', datanodeUuid='6c6b2bbe-587c-4d27-820d-03bc9d6cf66c', xmitsInProgress=0}:Exception transferring block BP-721148889-172.17.0.2-1731736062832:blk_1073741869_1052 to mirror 127.0.0.1:32791 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:04,682 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33595,DS-b6fc0778-deca-4ea8-8017-355e28735fb0,DISK], DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK]) is bad. 2024-11-16T05:48:04,682 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741869_1052 2024-11-16T05:48:04,683 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57152 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741869_1052] {}] datanode.BlockReceiver(316): Block 1073741869 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T05:48:04,683 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57152 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741869_1052] {}] datanode.DataXceiver(331): 127.0.0.1:33595:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57152 dst: /127.0.0.1:33595 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:04,683 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK] 2024-11-16T05:48:04,685 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:04,685 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK], DatanodeInfoWithStorage[127.0.0.1:33595,DS-b6fc0778-deca-4ea8-8017-355e28735fb0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK]) is bad. 2024-11-16T05:48:04,685 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741870_1053 2024-11-16T05:48:04,685 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK] 2024-11-16T05:48:04,686 WARN [IPC Server handler 1 on default port 36821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T05:48:04,686 WARN [IPC Server handler 1 on default port 36821 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T05:48:04,686 WARN [IPC Server handler 1 on default port 36821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T05:48:04,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741871_1054 (size=6027) 2024-11-16T05:48:04,781 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6664ae27[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33595, datanodeUuid=6c6b2bbe-587c-4d27-820d-03bc9d6cf66c, infoPort=33009, infoSecurePort=0, ipcPort=44213, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832):Failed to transfer BP-721148889-172.17.0.2-1731736062832:blk_1073741846_1029 to 127.0.0.1:37035 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:04,781 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3ddb7113[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33595, datanodeUuid=6c6b2bbe-587c-4d27-820d-03bc9d6cf66c, infoPort=33009, infoSecurePort=0, ipcPort=44213, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832):Failed to transfer BP-721148889-172.17.0.2-1731736062832:blk_1073741851_1034 to 127.0.0.1:32791 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:05,091 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/.tmp/info/af217228052c4f3ba40e1f10b3daf66f 2024-11-16T05:48:05,103 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/.tmp/info/af217228052c4f3ba40e1f10b3daf66f as hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/af217228052c4f3ba40e1f10b3daf66f 2024-11-16T05:48:05,109 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/af217228052c4f3ba40e1f10b3daf66f, entries=1, sequenceid=45, filesize=5.9 K 2024-11-16T05:48:05,110 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for d0ad76cb64ef7ce60d3f54e1e7efc517 in 445ms, sequenceid=45, compaction requested=false 2024-11-16T05:48:05,110 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d0ad76cb64ef7ce60d3f54e1e7efc517: 2024-11-16T05:48:05,110 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-16T05:48:05,110 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:48:05,110 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/c51c492df92b4822bad124bc007dcad7 because midkey is the same as first or last row 2024-11-16T05:48:05,166 WARN [regionserver/3456ee6a3164:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33595,DS-b6fc0778-deca-4ea8-8017-355e28735fb0,DISK]] 2024-11-16T05:48:05,166 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:05,167 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3456ee6a3164%2C46863%2C1731736063466:(num 1731736083147) roll requested 2024-11-16T05:48:05,167 INFO [regionserver/3456ee6a3164:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C46863%2C1731736063466.1731736085167 2024-11-16T05:48:05,172 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:05,172 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK], DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK]) is bad. 2024-11-16T05:48:05,172 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741872_1055 2024-11-16T05:48:05,173 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK] 2024-11-16T05:48:05,178 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38437 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:05,178 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57166 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741873_1056] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data6]'}, localName='127.0.0.1:33595', datanodeUuid='6c6b2bbe-587c-4d27-820d-03bc9d6cf66c', xmitsInProgress=0}:Exception transferring block BP-721148889-172.17.0.2-1731736062832:blk_1073741873_1056 to mirror 127.0.0.1:38437 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:05,179 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33595,DS-b6fc0778-deca-4ea8-8017-355e28735fb0,DISK], DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK]) is bad. 2024-11-16T05:48:05,179 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741873_1056 2024-11-16T05:48:05,179 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57166 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741873_1056] {}] datanode.BlockReceiver(316): Block 1073741873 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-16T05:48:05,179 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57166 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741873_1056] {}] datanode.DataXceiver(331): 127.0.0.1:33595:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57166 dst: /127.0.0.1:33595 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:05,180 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK] 2024-11-16T05:48:05,181 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:05,181 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK], DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK]) is bad. 2024-11-16T05:48:05,181 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741874_1057 2024-11-16T05:48:05,182 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK] 2024-11-16T05:48:05,183 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:05,183 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK], DatanodeInfoWithStorage[127.0.0.1:33595,DS-b6fc0778-deca-4ea8-8017-355e28735fb0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]) is bad. 2024-11-16T05:48:05,183 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741875_1058 2024-11-16T05:48:05,184 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK] 2024-11-16T05:48:05,185 WARN [IPC Server handler 1 on default port 36821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T05:48:05,185 WARN [IPC Server handler 1 on default port 36821 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T05:48:05,185 WARN [IPC Server handler 1 on default port 36821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T05:48:05,188 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:05,188 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:05,188 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:05,188 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:05,188 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:05,188 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736083147 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736085167 2024-11-16T05:48:05,189 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33009:33009)] 2024-11-16T05:48:05,189 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736063883 is not closed yet, will try archiving it next time 2024-11-16T05:48:05,189 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736083147 is not closed yet, will try archiving it next time 2024-11-16T05:48:05,189 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736079127 to hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/oldWALs/3456ee6a3164%2C46863%2C1731736063466.1731736079127 2024-11-16T05:48:05,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741856_1039 (size=13591) 2024-11-16T05:48:05,559 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:05,592 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736063883 is not closed yet, will try archiving it next time 2024-11-16T05:48:05,775 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3ddb7113[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33595, datanodeUuid=6c6b2bbe-587c-4d27-820d-03bc9d6cf66c, infoPort=33009, infoSecurePort=0, ipcPort=44213, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832):Failed to transfer BP-721148889-172.17.0.2-1731736062832:blk_1073741841_1024 to 127.0.0.1:32791 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:05,775 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6664ae27[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33595, datanodeUuid=6c6b2bbe-587c-4d27-820d-03bc9d6cf66c, infoPort=33009, infoSecurePort=0, ipcPort=44213, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832):Failed to transfer BP-721148889-172.17.0.2-1731736062832:blk_1073741861_1044 to 127.0.0.1:37035 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:06,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46863 {}] regionserver.HRegion(8855): Flush requested on d0ad76cb64ef7ce60d3f54e1e7efc517 2024-11-16T05:48:06,098 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d0ad76cb64ef7ce60d3f54e1e7efc517 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-16T05:48:06,104 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/.tmp/info/f870aaf106b74d9699efd6680064f6c6 is 1079, key is tmprow/info:/1731736086097/Put/seqid=0 2024-11-16T05:48:06,107 WARN [Thread-967 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:06,107 WARN [Thread-967 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK], DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK]) is bad. 2024-11-16T05:48:06,107 WARN [Thread-967 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741877_1060 2024-11-16T05:48:06,108 WARN [Thread-967 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK] 2024-11-16T05:48:06,110 WARN [Thread-967 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:06,110 WARN [Thread-967 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK], DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK]) is bad. 2024-11-16T05:48:06,110 WARN [Thread-967 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741878_1061 2024-11-16T05:48:06,111 WARN [Thread-967 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK] 2024-11-16T05:48:06,113 WARN [Thread-967 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:06,113 WARN [Thread-967 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK], DatanodeInfoWithStorage[127.0.0.1:33595,DS-b6fc0778-deca-4ea8-8017-355e28735fb0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK]) is bad. 2024-11-16T05:48:06,113 WARN [Thread-967 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741879_1062 2024-11-16T05:48:06,114 WARN [Thread-967 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK] 2024-11-16T05:48:06,115 WARN [Thread-967 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:06,115 WARN [Thread-967 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK], DatanodeInfoWithStorage[127.0.0.1:33595,DS-b6fc0778-deca-4ea8-8017-355e28735fb0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]) is bad. 2024-11-16T05:48:06,115 WARN [Thread-967 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741880_1063 2024-11-16T05:48:06,116 WARN [Thread-967 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK] 2024-11-16T05:48:06,117 WARN [IPC Server handler 4 on default port 36821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T05:48:06,117 WARN [IPC Server handler 4 on default port 36821 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T05:48:06,117 WARN [IPC Server handler 4 on default port 36821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T05:48:06,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741881_1064 (size=6027) 2024-11-16T05:48:06,491 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:06,521 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/.tmp/info/f870aaf106b74d9699efd6680064f6c6 2024-11-16T05:48:06,533 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/.tmp/info/f870aaf106b74d9699efd6680064f6c6 as hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/f870aaf106b74d9699efd6680064f6c6 2024-11-16T05:48:06,542 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/f870aaf106b74d9699efd6680064f6c6, entries=1, sequenceid=55, filesize=5.9 K 2024-11-16T05:48:06,543 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for d0ad76cb64ef7ce60d3f54e1e7efc517 in 445ms, sequenceid=55, compaction requested=true 2024-11-16T05:48:06,543 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d0ad76cb64ef7ce60d3f54e1e7efc517: 2024-11-16T05:48:06,543 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-16T05:48:06,543 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:48:06,544 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/c51c492df92b4822bad124bc007dcad7 because midkey is the same as first or last row 2024-11-16T05:48:06,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d0ad76cb64ef7ce60d3f54e1e7efc517:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T05:48:06,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T05:48:06,544 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T05:48:06,545 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T05:48:06,545 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.HStore(1541): d0ad76cb64ef7ce60d3f54e1e7efc517/info is initiating minor compaction (all files) 2024-11-16T05:48:06,545 INFO [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of d0ad76cb64ef7ce60d3f54e1e7efc517/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517. 2024-11-16T05:48:06,545 INFO [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/c51c492df92b4822bad124bc007dcad7, hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/af217228052c4f3ba40e1f10b3daf66f, hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/f870aaf106b74d9699efd6680064f6c6] into tmpdir=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/.tmp, totalSize=29.3 K 2024-11-16T05:48:06,546 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] compactions.Compactor(225): Compacting c51c492df92b4822bad124bc007dcad7, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731736077137 2024-11-16T05:48:06,546 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] compactions.Compactor(225): Compacting af217228052c4f3ba40e1f10b3daf66f, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731736084663 2024-11-16T05:48:06,546 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] compactions.Compactor(225): Compacting f870aaf106b74d9699efd6680064f6c6, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731736086097 2024-11-16T05:48:06,560 INFO [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d0ad76cb64ef7ce60d3f54e1e7efc517#info#compaction#24 average throughput is 12.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T05:48:06,561 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/.tmp/info/8be8c7e93f3f4a9b86abc628ace783ee is 1080, key is row0002/info:/1731736077137/Put/seqid=0 2024-11-16T05:48:06,563 WARN [Thread-971 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40457 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:06,563 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57190 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741882_1065] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data6]'}, localName='127.0.0.1:33595', datanodeUuid='6c6b2bbe-587c-4d27-820d-03bc9d6cf66c', xmitsInProgress=0}:Exception transferring block BP-721148889-172.17.0.2-1731736062832:blk_1073741882_1065 to mirror 127.0.0.1:40457 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:06,563 WARN [Thread-971 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33595,DS-b6fc0778-deca-4ea8-8017-355e28735fb0,DISK], DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]) is bad. 2024-11-16T05:48:06,563 WARN [Thread-971 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741882_1065 2024-11-16T05:48:06,564 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57190 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741882_1065] {}] datanode.BlockReceiver(316): Block 1073741882 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T05:48:06,564 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57190 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741882_1065] {}] datanode.DataXceiver(331): 127.0.0.1:33595:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57190 dst: /127.0.0.1:33595 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:06,564 WARN [Thread-971 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK] 2024-11-16T05:48:06,565 WARN [Thread-971 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:06,566 WARN [Thread-971 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK], DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK]) is bad. 2024-11-16T05:48:06,566 WARN [Thread-971 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741883_1066 2024-11-16T05:48:06,566 WARN [Thread-971 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK] 2024-11-16T05:48:06,568 WARN [Thread-971 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37035 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:06,568 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57202 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741884_1067] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data6]'}, localName='127.0.0.1:33595', datanodeUuid='6c6b2bbe-587c-4d27-820d-03bc9d6cf66c', xmitsInProgress=0}:Exception transferring block BP-721148889-172.17.0.2-1731736062832:blk_1073741884_1067 to mirror 127.0.0.1:37035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:06,569 WARN [Thread-971 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33595,DS-b6fc0778-deca-4ea8-8017-355e28735fb0,DISK], DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK]) is bad. 2024-11-16T05:48:06,569 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57202 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741884_1067] {}] datanode.BlockReceiver(316): Block 1073741884 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T05:48:06,569 WARN [Thread-971 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741884_1067 2024-11-16T05:48:06,569 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57202 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741884_1067] {}] datanode.DataXceiver(331): 127.0.0.1:33595:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57202 dst: /127.0.0.1:33595 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:06,569 WARN [Thread-971 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37035,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK] 2024-11-16T05:48:06,572 WARN [Thread-971 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32791 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:06,572 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57204 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741885_1068] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data6]'}, localName='127.0.0.1:33595', datanodeUuid='6c6b2bbe-587c-4d27-820d-03bc9d6cf66c', xmitsInProgress=0}:Exception transferring block BP-721148889-172.17.0.2-1731736062832:blk_1073741885_1068 to mirror 127.0.0.1:32791 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:06,572 WARN [Thread-971 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33595,DS-b6fc0778-deca-4ea8-8017-355e28735fb0,DISK], DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK]) is bad. 2024-11-16T05:48:06,572 WARN [Thread-971 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741885_1068 2024-11-16T05:48:06,572 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57204 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741885_1068] {}] datanode.BlockReceiver(316): Block 1073741885 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T05:48:06,572 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:57204 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741885_1068] {}] datanode.DataXceiver(331): 127.0.0.1:33595:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57204 dst: /127.0.0.1:33595 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:06,573 WARN [Thread-971 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK] 2024-11-16T05:48:06,574 WARN [IPC Server handler 1 on default port 36821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T05:48:06,574 WARN [IPC Server handler 1 on default port 36821 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T05:48:06,574 WARN [IPC Server handler 1 on default port 36821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T05:48:06,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741886_1069 (size=18097) 2024-11-16T05:48:06,988 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/.tmp/info/8be8c7e93f3f4a9b86abc628ace783ee as hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/8be8c7e93f3f4a9b86abc628ace783ee 2024-11-16T05:48:06,996 INFO [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in d0ad76cb64ef7ce60d3f54e1e7efc517/info of d0ad76cb64ef7ce60d3f54e1e7efc517 into 8be8c7e93f3f4a9b86abc628ace783ee(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T05:48:06,996 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for d0ad76cb64ef7ce60d3f54e1e7efc517: 2024-11-16T05:48:06,996 INFO [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517., storeName=d0ad76cb64ef7ce60d3f54e1e7efc517/info, priority=13, startTime=1731736086544; duration=0sec 2024-11-16T05:48:06,996 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-16T05:48:06,996 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:48:06,997 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/8be8c7e93f3f4a9b86abc628ace783ee because midkey is the same as first or last row 2024-11-16T05:48:06,997 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-16T05:48:06,997 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:48:06,997 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/8be8c7e93f3f4a9b86abc628ace783ee because midkey is the same as first or last row 2024-11-16T05:48:06,997 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-16T05:48:06,997 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:48:06,997 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/8be8c7e93f3f4a9b86abc628ace783ee because midkey is the same as first or last row 2024-11-16T05:48:06,997 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T05:48:06,997 DEBUG [RS:0;3456ee6a3164:46863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d0ad76cb64ef7ce60d3f54e1e7efc517:info 2024-11-16T05:48:07,189 WARN [regionserver/3456ee6a3164:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-16T05:48:07,190 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:07,338 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:48:07,342 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T05:48:07,343 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T05:48:07,343 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T05:48:07,343 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T05:48:07,344 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6287ee78{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/hadoop.log.dir/,AVAILABLE} 2024-11-16T05:48:07,344 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f9c830c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T05:48:07,436 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7085983e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/java.io.tmpdir/jetty-localhost-43667-hadoop-hdfs-3_4_1-tests_jar-_-any-16046848210415090913/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:48:07,436 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5dbb3308{HTTP/1.1, (http/1.1)}{localhost:43667} 2024-11-16T05:48:07,436 INFO [Time-limited test {}] server.Server(415): Started @129135ms 2024-11-16T05:48:07,437 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T05:48:07,511 WARN [Thread-992 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T05:48:07,518 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcc42fc8688bce944 with lease ID 0xbb7795547e792d7c: from storage DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163 node DatanodeRegistration(127.0.0.1:40791, datanodeUuid=aadbfb3b-707a-4c20-b5d9-9812720a1509, infoPort=46513, infoSecurePort=0, ipcPort=46725, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T05:48:07,519 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcc42fc8688bce944 with lease ID 0xbb7795547e792d7c: from storage DS-e8eebe27-dffb-4fe4-93a5-398123953f20 node DatanodeRegistration(127.0.0.1:40791, datanodeUuid=aadbfb3b-707a-4c20-b5d9-9812720a1509, infoPort=46513, infoSecurePort=0, ipcPort=46725, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:48:07,560 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:07,774 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3ddb7113[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33595, datanodeUuid=6c6b2bbe-587c-4d27-820d-03bc9d6cf66c, infoPort=33009, infoSecurePort=0, ipcPort=44213, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832):Failed to transfer BP-721148889-172.17.0.2-1731736062832:blk_1073741871_1054 to 127.0.0.1:32791 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:07,774 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6664ae27[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33595, datanodeUuid=6c6b2bbe-587c-4d27-820d-03bc9d6cf66c, infoPort=33009, infoSecurePort=0, ipcPort=44213, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832):Failed to transfer BP-721148889-172.17.0.2-1731736062832:blk_1073741866_1049 to 127.0.0.1:40457 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:08,492 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:08,773 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6664ae27[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33595, datanodeUuid=6c6b2bbe-587c-4d27-820d-03bc9d6cf66c, infoPort=33009, infoSecurePort=0, ipcPort=44213, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832):Failed to transfer BP-721148889-172.17.0.2-1731736062832:blk_1073741856_1039 to 127.0.0.1:38437 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:08,773 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3ddb7113[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33595, datanodeUuid=6c6b2bbe-587c-4d27-820d-03bc9d6cf66c, infoPort=33009, infoSecurePort=0, ipcPort=44213, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832):Failed to transfer BP-721148889-172.17.0.2-1731736062832:blk_1073741881_1064 to 127.0.0.1:40457 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:09,190 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:09,560 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:10,493 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:10,776 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6664ae27[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33595, datanodeUuid=6c6b2bbe-587c-4d27-820d-03bc9d6cf66c, infoPort=33009, infoSecurePort=0, ipcPort=44213, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832):Failed to transfer BP-721148889-172.17.0.2-1731736062832:blk_1073741886_1069 to 127.0.0.1:38437 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:11,191 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:11,561 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:12,494 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:13,191 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:13,402 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T05:48:13,561 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:13,662 ERROR [FSHLog-0-hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData-prefix:3456ee6a3164,44501,1731736063421 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:13,662 WARN [FSHLog-0-hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData-prefix:3456ee6a3164,44501,1731736063421 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:13,663 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 3456ee6a3164%2C44501%2C1731736063421:(num 1731736063577) roll requested 2024-11-16T05:48:13,663 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C44501%2C1731736063421.1731736093663 2024-11-16T05:48:13,667 WARN [Thread-1012 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1070 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:13,667 WARN [Thread-1012 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741887_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK], DatanodeInfoWithStorage[127.0.0.1:40791,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK]) is bad. 2024-11-16T05:48:13,667 WARN [Thread-1012 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741887_1070 2024-11-16T05:48:13,668 WARN [Thread-1012 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32791,DS-f0d460ad-4dd6-446a-807e-7b20786e1c6b,DISK] 2024-11-16T05:48:13,671 WARN [Thread-1012 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1071 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38437 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:13,671 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-602501559_22 at /127.0.0.1:41242 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741888_1071] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data4]'}, localName='127.0.0.1:40791', datanodeUuid='aadbfb3b-707a-4c20-b5d9-9812720a1509', xmitsInProgress=0}:Exception transferring block BP-721148889-172.17.0.2-1731736062832:blk_1073741888_1071 to mirror 127.0.0.1:38437 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:13,672 WARN [Thread-1012 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741888_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40791,DS-d7302f58-4b9d-4f59-b7d4-7acf7d0bd163,DISK], DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK]) is bad. 2024-11-16T05:48:13,672 WARN [Thread-1012 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741888_1071 2024-11-16T05:48:13,672 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-602501559_22 at /127.0.0.1:41242 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741888_1071] {}] datanode.BlockReceiver(316): Block 1073741888 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-16T05:48:13,672 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-602501559_22 at /127.0.0.1:41242 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741888_1071] {}] datanode.DataXceiver(331): 127.0.0.1:40791:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41242 dst: /127.0.0.1:40791 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:13,673 WARN [Thread-1012 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK] 2024-11-16T05:48:13,678 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:13,678 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:13,678 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:13,678 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:13,679 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:13,679 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/WALs/3456ee6a3164,44501,1731736063421/3456ee6a3164%2C44501%2C1731736063421.1731736063577 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/WALs/3456ee6a3164,44501,1731736063421/3456ee6a3164%2C44501%2C1731736063421.1731736093663 2024-11-16T05:48:13,679 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:13,679 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:13,679 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/WALs/3456ee6a3164,44501,1731736063421/3456ee6a3164%2C44501%2C1731736063421.1731736063577 2024-11-16T05:48:13,680 WARN [IPC Server handler 0 on default port 36821 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/WALs/3456ee6a3164,44501,1731736063421/3456ee6a3164%2C44501%2C1731736063421.1731736063577 has not been closed. Lease recovery is in progress. RecoveryId = 1073 for block blk_1073741830_1006 2024-11-16T05:48:13,680 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/WALs/3456ee6a3164,44501,1731736063421/3456ee6a3164%2C44501%2C1731736063421.1731736063577 after 1ms 2024-11-16T05:48:13,681 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33009:33009),(127.0.0.1/127.0.0.1:46513:46513)] 2024-11-16T05:48:13,681 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/WALs/3456ee6a3164,44501,1731736063421/3456ee6a3164%2C44501%2C1731736063421.1731736063577 is not closed yet, will try archiving it next time 2024-11-16T05:48:14,494 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:15,192 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:16,495 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:17,193 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:17,531 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@72ed8f7b {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-721148889-172.17.0.2-1731736062832:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:40457,null,null]) java.net.ConnectException: Call From 3456ee6a3164/172.17.0.2 to localhost:35993 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-16T05:48:17,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40791 is added to blk_1073741833_1020 (size=455) 2024-11-16T05:48:17,683 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/WALs/3456ee6a3164,44501,1731736063421/3456ee6a3164%2C44501%2C1731736063421.1731736063577 after 4004ms 2024-11-16T05:48:18,166 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736063883 to hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/oldWALs/3456ee6a3164%2C46863%2C1731736063466.1731736063883 2024-11-16T05:48:18,169 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736083147 to hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/oldWALs/3456ee6a3164%2C46863%2C1731736063466.1731736083147 2024-11-16T05:48:18,495 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:19,193 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:19,519 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@12c27034[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40791, datanodeUuid=aadbfb3b-707a-4c20-b5d9-9812720a1509, infoPort=46513, infoSecurePort=0, ipcPort=46725, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832):Failed to transfer BP-721148889-172.17.0.2-1731736062832:blk_1073741835_1011 to 127.0.0.1:38437 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:19,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741833_1020 (size=455) 2024-11-16T05:48:20,496 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:20,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741829_1005 (size=34) 2024-11-16T05:48:20,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741831_1007 (size=1321) 2024-11-16T05:48:20,955 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C46863%2C1731736063466.1731736100954 2024-11-16T05:48:20,966 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:20,966 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:20,966 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:20,967 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:20,967 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:20,967 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736085167 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736100954 2024-11-16T05:48:20,968 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46513:46513),(127.0.0.1/127.0.0.1:33009:33009)] 2024-11-16T05:48:20,968 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.1731736085167 is not closed yet, will try archiving it next time 2024-11-16T05:48:20,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741876_1059 (size=12911) 2024-11-16T05:48:20,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46863 {}] regionserver.HRegion(8855): Flush requested on d0ad76cb64ef7ce60d3f54e1e7efc517 2024-11-16T05:48:20,973 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d0ad76cb64ef7ce60d3f54e1e7efc517 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-16T05:48:20,979 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/.tmp/info/88269e0afc9f40f3b0229bf9a1c36528 is 1080, key is row0013/info:/1731736100970/Put/seqid=0 2024-11-16T05:48:20,982 WARN [Thread-1034 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1075 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38437 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:20,982 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:54992 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741891_1075] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data6]'}, localName='127.0.0.1:33595', datanodeUuid='6c6b2bbe-587c-4d27-820d-03bc9d6cf66c', xmitsInProgress=0}:Exception transferring block BP-721148889-172.17.0.2-1731736062832:blk_1073741891_1075 to mirror 127.0.0.1:38437 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:20,982 WARN [Thread-1034 {}] hdfs.DataStreamer(1731): Error Recovery for BP-721148889-172.17.0.2-1731736062832:blk_1073741891_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33595,DS-b6fc0778-deca-4ea8-8017-355e28735fb0,DISK], DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK]) is bad. 2024-11-16T05:48:20,982 WARN [Thread-1034 {}] hdfs.DataStreamer(1850): Abandoning BP-721148889-172.17.0.2-1731736062832:blk_1073741891_1075 2024-11-16T05:48:20,982 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:54992 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741891_1075] {}] datanode.BlockReceiver(316): Block 1073741891 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T05:48:20,982 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1676914167_22 at /127.0.0.1:54992 [Receiving block BP-721148889-172.17.0.2-1731736062832:blk_1073741891_1075] {}] datanode.DataXceiver(331): 127.0.0.1:33595:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54992 dst: /127.0.0.1:33595 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:20,983 WARN [Thread-1034 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38437,DS-9d15687a-5ea1-4f1f-a8f5-8c10b1107ac9,DISK] 2024-11-16T05:48:20,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40791 is added to blk_1073741892_1076 (size=8190) 2024-11-16T05:48:20,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741892_1076 (size=8190) 2024-11-16T05:48:21,194 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-16T05:48:21,194 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:21,195 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T05:48:21,196 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T05:48:21,197 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T05:48:21,197 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:48:21,198 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:48:21,198 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T05:48:21,198 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T05:48:21,198 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1058747879, stopped=false 2024-11-16T05:48:21,199 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3456ee6a3164,44501,1731736063421 2024-11-16T05:48:21,202 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46863-0x1004712aa1b0001, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T05:48:21,202 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41701-0x1004712aa1b0002, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T05:48:21,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44501-0x1004712aa1b0000, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T05:48:21,202 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46863-0x1004712aa1b0001, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:48:21,202 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41701-0x1004712aa1b0002, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:48:21,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44501-0x1004712aa1b0000, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:48:21,202 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T05:48:21,202 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T05:48:21,202 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T05:48:21,202 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:48:21,203 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41701-0x1004712aa1b0002, quorum=127.0.0.1:50879, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T05:48:21,203 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44501-0x1004712aa1b0000, quorum=127.0.0.1:50879, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T05:48:21,203 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3456ee6a3164,46863,1731736063466' ***** 2024-11-16T05:48:21,203 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46863-0x1004712aa1b0001, quorum=127.0.0.1:50879, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T05:48:21,203 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T05:48:21,203 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3456ee6a3164,41701,1731736064423' ***** 2024-11-16T05:48:21,203 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T05:48:21,204 INFO [RS:1;3456ee6a3164:41701 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T05:48:21,204 INFO [RS:0;3456ee6a3164:46863 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T05:48:21,204 INFO [RS:1;3456ee6a3164:41701 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T05:48:21,204 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T05:48:21,204 INFO [RS:1;3456ee6a3164:41701 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T05:48:21,204 INFO [RS:1;3456ee6a3164:41701 {}] regionserver.HRegionServer(959): stopping server 3456ee6a3164,41701,1731736064423 2024-11-16T05:48:21,204 INFO [RS:1;3456ee6a3164:41701 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T05:48:21,204 INFO [RS:1;3456ee6a3164:41701 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;3456ee6a3164:41701. 2024-11-16T05:48:21,204 DEBUG [RS:1;3456ee6a3164:41701 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T05:48:21,204 DEBUG [RS:1;3456ee6a3164:41701 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:48:21,205 INFO [RS:1;3456ee6a3164:41701 {}] regionserver.HRegionServer(976): stopping server 3456ee6a3164,41701,1731736064423; all regions closed. 2024-11-16T05:48:21,205 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:21,205 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:21,205 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:21,205 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:21,205 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:21,206 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:21,206 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:21,206 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 2024-11-16T05:48:21,207 WARN [IPC Server handler 3 on default port 36821 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 has not been closed. Lease recovery is in progress. RecoveryId = 1077 for block blk_1073741837_1015 2024-11-16T05:48:21,207 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 after 1ms 2024-11-16T05:48:21,396 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/.tmp/info/88269e0afc9f40f3b0229bf9a1c36528 2024-11-16T05:48:21,409 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/.tmp/info/88269e0afc9f40f3b0229bf9a1c36528 as hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/88269e0afc9f40f3b0229bf9a1c36528 2024-11-16T05:48:21,418 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/88269e0afc9f40f3b0229bf9a1c36528, entries=3, sequenceid=66, filesize=8.0 K 2024-11-16T05:48:21,419 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for d0ad76cb64ef7ce60d3f54e1e7efc517 in 446ms, sequenceid=66, compaction requested=false 2024-11-16T05:48:21,419 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d0ad76cb64ef7ce60d3f54e1e7efc517: 2024-11-16T05:48:21,420 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-16T05:48:21,420 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:48:21,420 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/8be8c7e93f3f4a9b86abc628ace783ee because midkey is the same as first or last row 2024-11-16T05:48:21,420 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T05:48:21,420 INFO [RS:0;3456ee6a3164:46863 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T05:48:21,420 INFO [RS:0;3456ee6a3164:46863 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T05:48:21,420 INFO [RS:0;3456ee6a3164:46863 {}] regionserver.HRegionServer(3091): Received CLOSE for d0ad76cb64ef7ce60d3f54e1e7efc517 2024-11-16T05:48:21,420 INFO [RS:0;3456ee6a3164:46863 {}] regionserver.HRegionServer(959): stopping server 3456ee6a3164,46863,1731736063466 2024-11-16T05:48:21,420 INFO [RS:0;3456ee6a3164:46863 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T05:48:21,420 INFO [RS:0;3456ee6a3164:46863 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3456ee6a3164:46863. 2024-11-16T05:48:21,421 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing d0ad76cb64ef7ce60d3f54e1e7efc517, disabling compactions & flushes 2024-11-16T05:48:21,421 DEBUG [RS:0;3456ee6a3164:46863 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T05:48:21,421 DEBUG [RS:0;3456ee6a3164:46863 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:48:21,421 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517. 2024-11-16T05:48:21,421 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517. 2024-11-16T05:48:21,421 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517. after waiting 0 ms 2024-11-16T05:48:21,421 INFO [RS:0;3456ee6a3164:46863 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T05:48:21,421 INFO [RS:0;3456ee6a3164:46863 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T05:48:21,421 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517. 2024-11-16T05:48:21,421 INFO [RS:0;3456ee6a3164:46863 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T05:48:21,421 INFO [RS:0;3456ee6a3164:46863 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T05:48:21,421 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing d0ad76cb64ef7ce60d3f54e1e7efc517 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-16T05:48:21,422 INFO [RS:0;3456ee6a3164:46863 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-16T05:48:21,422 DEBUG [RS:0;3456ee6a3164:46863 {}] regionserver.HRegionServer(1325): Online Regions={d0ad76cb64ef7ce60d3f54e1e7efc517=TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517., 1588230740=hbase:meta,,1.1588230740} 2024-11-16T05:48:21,422 DEBUG [RS:0;3456ee6a3164:46863 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, d0ad76cb64ef7ce60d3f54e1e7efc517 2024-11-16T05:48:21,422 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T05:48:21,422 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T05:48:21,422 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T05:48:21,422 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T05:48:21,422 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T05:48:21,422 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-16T05:48:21,423 ERROR [FSHLog-0-hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7-prefix:3456ee6a3164,46863,1731736063466.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:21,423 WARN [FSHLog-0-hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7-prefix:3456ee6a3164,46863,1731736063466.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:21,423 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3456ee6a3164%2C46863%2C1731736063466.meta:.meta(num 1731736064298) roll requested 2024-11-16T05:48:21,423 INFO [regionserver/3456ee6a3164:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C46863%2C1731736063466.meta.1731736101423.meta 2024-11-16T05:48:21,426 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/.tmp/info/458ff32a92bc44309e452715411d393c is 1080, key is row0015/info:/1731736100974/Put/seqid=0 2024-11-16T05:48:21,428 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:21,428 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:21,428 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:21,429 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:21,429 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:21,429 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736101423.meta 2024-11-16T05:48:21,430 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:21,430 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40457,DS-d44ada13-9000-43de-afa5-1afda4a2b5ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:21,430 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta 2024-11-16T05:48:21,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40791 is added to blk_1073741894_1079 (size=14660) 2024-11-16T05:48:21,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741894_1079 (size=14660) 2024-11-16T05:48:21,431 WARN [IPC Server handler 0 on default port 36821 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta has not been closed. Lease recovery is in progress. RecoveryId = 1080 for block blk_1073741834_1010 2024-11-16T05:48:21,431 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46513:46513),(127.0.0.1/127.0.0.1:33009:33009)] 2024-11-16T05:48:21,431 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta is not closed yet, will try archiving it next time 2024-11-16T05:48:21,431 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/.tmp/info/458ff32a92bc44309e452715411d393c 2024-11-16T05:48:21,431 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta after 1ms 2024-11-16T05:48:21,437 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/.tmp/info/458ff32a92bc44309e452715411d393c as hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/458ff32a92bc44309e452715411d393c 2024-11-16T05:48:21,443 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/458ff32a92bc44309e452715411d393c, entries=9, sequenceid=78, filesize=14.3 K 2024-11-16T05:48:21,444 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for d0ad76cb64ef7ce60d3f54e1e7efc517 in 23ms, sequenceid=78, compaction requested=true 2024-11-16T05:48:21,445 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/b8d9f647a7614b8197d4fb956968ceea, hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/7086daede5af40a7bb270498dfe05f04, hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/c51c492df92b4822bad124bc007dcad7, hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/4b11b97ab2a040ed91f3dab14a4d0769, hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/af217228052c4f3ba40e1f10b3daf66f, hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/f870aaf106b74d9699efd6680064f6c6] to archive 2024-11-16T05:48:21,446 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T05:48:21,448 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/b8d9f647a7614b8197d4fb956968ceea to hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/b8d9f647a7614b8197d4fb956968ceea 2024-11-16T05:48:21,449 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/hbase/meta/1588230740/.tmp/info/9abd7d09d93d4a46a0c6e0cda9771291 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517./info:regioninfo/1731736064895/Put/seqid=0 2024-11-16T05:48:21,449 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/7086daede5af40a7bb270498dfe05f04 to hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/7086daede5af40a7bb270498dfe05f04 2024-11-16T05:48:21,450 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/c51c492df92b4822bad124bc007dcad7 to hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/c51c492df92b4822bad124bc007dcad7 2024-11-16T05:48:21,452 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/4b11b97ab2a040ed91f3dab14a4d0769 to hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/4b11b97ab2a040ed91f3dab14a4d0769 2024-11-16T05:48:21,453 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/af217228052c4f3ba40e1f10b3daf66f to hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/af217228052c4f3ba40e1f10b3daf66f 2024-11-16T05:48:21,455 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/f870aaf106b74d9699efd6680064f6c6 to hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/info/f870aaf106b74d9699efd6680064f6c6 2024-11-16T05:48:21,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741895_1081 (size=7089) 2024-11-16T05:48:21,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40791 is added to blk_1073741895_1081 (size=7089) 2024-11-16T05:48:21,455 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=3456ee6a3164:44501 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-16T05:48:21,456 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [b8d9f647a7614b8197d4fb956968ceea=10347, 7086daede5af40a7bb270498dfe05f04=12506, c51c492df92b4822bad124bc007dcad7=17994, 4b11b97ab2a040ed91f3dab14a4d0769=6027, af217228052c4f3ba40e1f10b3daf66f=6027, f870aaf106b74d9699efd6680064f6c6=6027] 2024-11-16T05:48:21,456 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/hbase/meta/1588230740/.tmp/info/9abd7d09d93d4a46a0c6e0cda9771291 2024-11-16T05:48:21,460 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d0ad76cb64ef7ce60d3f54e1e7efc517/recovered.edits/81.seqid, newMaxSeqId=81, maxSeqId=1 2024-11-16T05:48:21,461 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517. 2024-11-16T05:48:21,461 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for d0ad76cb64ef7ce60d3f54e1e7efc517: Waiting for close lock at 1731736101420Running coprocessor pre-close hooks at 1731736101420Disabling compacts and flushes for region at 1731736101420Disabling writes for close at 1731736101421 (+1 ms)Obtaining lock to block concurrent updates at 1731736101421Preparing flush snapshotting stores in d0ad76cb64ef7ce60d3f54e1e7efc517 at 1731736101421Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517., syncing WAL and waiting on mvcc, flushsize=dataSize=9683, getHeapSize=10608, getOffHeapSize=0, getCellsCount=9 at 1731736101421Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517. at 1731736101422 (+1 ms)Flushing d0ad76cb64ef7ce60d3f54e1e7efc517/info: creating writer at 1731736101422Flushing d0ad76cb64ef7ce60d3f54e1e7efc517/info: appending metadata at 1731736101425 (+3 ms)Flushing d0ad76cb64ef7ce60d3f54e1e7efc517/info: closing flushed file at 1731736101425Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1442d547: reopening flushed file at 1731736101436 (+11 ms)Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for d0ad76cb64ef7ce60d3f54e1e7efc517 in 23ms, sequenceid=78, compaction requested=true at 1731736101444 (+8 ms)Writing region close event to WAL at 1731736101456 (+12 ms)Running coprocessor post-close hooks at 1731736101461 (+5 ms)Closed at 1731736101461 2024-11-16T05:48:21,461 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731736064522.d0ad76cb64ef7ce60d3f54e1e7efc517. 2024-11-16T05:48:21,476 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/hbase/meta/1588230740/.tmp/ns/b26a4d3b37c24956b79d8bc880ac625e is 43, key is default/ns:d/1731736064362/Put/seqid=0 2024-11-16T05:48:21,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741896_1082 (size=5153) 2024-11-16T05:48:21,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40791 is added to blk_1073741896_1082 (size=5153) 2024-11-16T05:48:21,482 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/hbase/meta/1588230740/.tmp/ns/b26a4d3b37c24956b79d8bc880ac625e 2024-11-16T05:48:21,504 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/hbase/meta/1588230740/.tmp/table/d122eabc05654bf7987ab8cfb84c2f90 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731736064910/Put/seqid=0 2024-11-16T05:48:21,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40791 is added to blk_1073741897_1083 (size=5424) 2024-11-16T05:48:21,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741897_1083 (size=5424) 2024-11-16T05:48:21,509 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/hbase/meta/1588230740/.tmp/table/d122eabc05654bf7987ab8cfb84c2f90 2024-11-16T05:48:21,517 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/hbase/meta/1588230740/.tmp/info/9abd7d09d93d4a46a0c6e0cda9771291 as hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/hbase/meta/1588230740/info/9abd7d09d93d4a46a0c6e0cda9771291 2024-11-16T05:48:21,524 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/hbase/meta/1588230740/info/9abd7d09d93d4a46a0c6e0cda9771291, entries=10, sequenceid=11, filesize=6.9 K 2024-11-16T05:48:21,525 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/hbase/meta/1588230740/.tmp/ns/b26a4d3b37c24956b79d8bc880ac625e as hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/hbase/meta/1588230740/ns/b26a4d3b37c24956b79d8bc880ac625e 2024-11-16T05:48:21,534 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/hbase/meta/1588230740/ns/b26a4d3b37c24956b79d8bc880ac625e, entries=2, sequenceid=11, filesize=5.0 K 2024-11-16T05:48:21,536 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/hbase/meta/1588230740/.tmp/table/d122eabc05654bf7987ab8cfb84c2f90 as hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/hbase/meta/1588230740/table/d122eabc05654bf7987ab8cfb84c2f90 2024-11-16T05:48:21,539 INFO [regionserver/3456ee6a3164:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-16T05:48:21,539 INFO [regionserver/3456ee6a3164:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-16T05:48:21,544 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/hbase/meta/1588230740/table/d122eabc05654bf7987ab8cfb84c2f90, entries=2, sequenceid=11, filesize=5.3 K 2024-11-16T05:48:21,545 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 123ms, sequenceid=11, compaction requested=false 2024-11-16T05:48:21,551 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-16T05:48:21,551 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T05:48:21,552 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T05:48:21,552 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731736101422Running coprocessor pre-close hooks at 1731736101422Disabling compacts and flushes for region at 1731736101422Disabling writes for close at 1731736101422Obtaining lock to block concurrent updates at 1731736101422Preparing flush snapshotting stores in 1588230740 at 1731736101422Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731736101423 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731736101431 (+8 ms)Flushing 1588230740/info: creating writer at 1731736101431Flushing 1588230740/info: appending metadata at 1731736101448 (+17 ms)Flushing 1588230740/info: closing flushed file at 1731736101448Flushing 1588230740/ns: creating writer at 1731736101462 (+14 ms)Flushing 1588230740/ns: appending metadata at 1731736101475 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1731736101475Flushing 1588230740/table: creating writer at 1731736101488 (+13 ms)Flushing 1588230740/table: appending metadata at 1731736101503 (+15 ms)Flushing 1588230740/table: closing flushed file at 1731736101504 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3e9252bc: reopening flushed file at 1731736101516 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2fedeceb: reopening flushed file at 1731736101524 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@241df6e6: reopening flushed file at 1731736101534 (+10 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 123ms, sequenceid=11, compaction requested=false at 1731736101545 (+11 ms)Writing region close event to WAL at 1731736101547 (+2 ms)Running coprocessor post-close hooks at 1731736101551 (+4 ms)Closed at 1731736101552 (+1 ms) 2024-11-16T05:48:21,552 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T05:48:21,622 INFO [RS:0;3456ee6a3164:46863 {}] regionserver.HRegionServer(976): stopping server 3456ee6a3164,46863,1731736063466; all regions closed. 2024-11-16T05:48:21,623 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:21,623 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:21,623 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:21,624 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:21,624 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:21,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741893_1078 (size=825) 2024-11-16T05:48:21,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40791 is added to blk_1073741893_1078 (size=825) 2024-11-16T05:48:21,739 INFO [regionserver/3456ee6a3164:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T05:48:21,764 INFO [regionserver/3456ee6a3164:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-16T05:48:21,764 INFO [regionserver/3456ee6a3164:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-16T05:48:22,493 INFO [regionserver/3456ee6a3164:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T05:48:22,519 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@ccaa081[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40791, datanodeUuid=aadbfb3b-707a-4c20-b5d9-9812720a1509, infoPort=46513, infoSecurePort=0, ipcPort=46725, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832):Failed to transfer BP-721148889-172.17.0.2-1731736062832:blk_1073741836_1012 to 127.0.0.1:38437 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:22,519 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@12c27034[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40791, datanodeUuid=aadbfb3b-707a-4c20-b5d9-9812720a1509, infoPort=46513, infoSecurePort=0, ipcPort=46725, storageInfo=lv=-57;cid=testClusterID;nsid=161557260;c=1731736062832):Failed to transfer BP-721148889-172.17.0.2-1731736062832:blk_1073741832_1008 to 127.0.0.1:38437 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:22,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40791 is added to blk_1073741876_1059 (size=12911) 2024-11-16T05:48:23,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741826_1002 (size=42) 2024-11-16T05:48:23,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741828_1004 (size=1189) 2024-11-16T05:48:24,305 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-16T05:48:24,305 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T05:48:24,305 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T05:48:24,378 INFO [master/3456ee6a3164:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-16T05:48:24,379 INFO [master/3456ee6a3164:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-16T05:48:25,209 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 after 4003ms 2024-11-16T05:48:25,433 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta after 4003ms 2024-11-16T05:48:25,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741825_1001 (size=7) 2024-11-16T05:48:25,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741827_1003 (size=196) 2024-11-16T05:48:26,206 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-16T05:48:26,209 DEBUG [RS:1;3456ee6a3164:41701 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/oldWALs 2024-11-16T05:48:26,209 INFO [RS:1;3456ee6a3164:41701 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3456ee6a3164%2C41701%2C1731736064423:(num 1731736064627) 2024-11-16T05:48:26,209 DEBUG [RS:1;3456ee6a3164:41701 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:48:26,209 INFO [RS:1;3456ee6a3164:41701 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T05:48:26,210 INFO [RS:1;3456ee6a3164:41701 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T05:48:26,210 INFO [RS:1;3456ee6a3164:41701 {}] hbase.ChoreService(370): Chore service for: regionserver/3456ee6a3164:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T05:48:26,210 INFO [RS:1;3456ee6a3164:41701 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T05:48:26,210 INFO [RS:1;3456ee6a3164:41701 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T05:48:26,210 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T05:48:26,210 INFO [RS:1;3456ee6a3164:41701 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T05:48:26,210 INFO [RS:1;3456ee6a3164:41701 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T05:48:26,210 INFO [RS:1;3456ee6a3164:41701 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41701 2024-11-16T05:48:26,213 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41701-0x1004712aa1b0002, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3456ee6a3164,41701,1731736064423 2024-11-16T05:48:26,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44501-0x1004712aa1b0000, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T05:48:26,213 INFO [RS:1;3456ee6a3164:41701 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T05:48:26,215 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3456ee6a3164,41701,1731736064423] 2024-11-16T05:48:26,217 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3456ee6a3164,41701,1731736064423 already deleted, retry=false 2024-11-16T05:48:26,217 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3456ee6a3164,41701,1731736064423 expired; onlineServers=1 2024-11-16T05:48:26,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:26,316 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41701-0x1004712aa1b0002, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T05:48:26,316 INFO [RS:1;3456ee6a3164:41701 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T05:48:26,316 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41701-0x1004712aa1b0002, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T05:48:26,316 INFO [RS:1;3456ee6a3164:41701 {}] regionserver.HRegionServer(1031): Exiting; stopping=3456ee6a3164,41701,1731736064423; zookeeper connection closed. 2024-11-16T05:48:26,317 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@684bb9cf {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@684bb9cf 2024-11-16T05:48:26,465 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:26,486 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:26,487 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:26,487 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:26,487 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:26,488 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:26,498 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:26,498 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:26,625 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-16T05:48:26,632 DEBUG [RS:0;3456ee6a3164:46863 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/oldWALs 2024-11-16T05:48:26,632 INFO [RS:0;3456ee6a3164:46863 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3456ee6a3164%2C46863%2C1731736063466.meta:.meta(num 1731736101423) 2024-11-16T05:48:26,633 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:26,633 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:26,633 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:26,633 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:26,634 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:26,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741890_1074 (size=14682) 2024-11-16T05:48:26,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40791 is added to blk_1073741890_1074 (size=14682) 2024-11-16T05:48:26,642 DEBUG [RS:0;3456ee6a3164:46863 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/oldWALs 2024-11-16T05:48:26,642 INFO [RS:0;3456ee6a3164:46863 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3456ee6a3164%2C46863%2C1731736063466:(num 1731736100954) 2024-11-16T05:48:26,642 DEBUG [RS:0;3456ee6a3164:46863 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:48:26,642 INFO [RS:0;3456ee6a3164:46863 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T05:48:26,643 INFO [RS:0;3456ee6a3164:46863 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T05:48:26,643 INFO [RS:0;3456ee6a3164:46863 {}] hbase.ChoreService(370): Chore service for: regionserver/3456ee6a3164:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-16T05:48:26,643 INFO [RS:0;3456ee6a3164:46863 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T05:48:26,643 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T05:48:26,643 INFO [RS:0;3456ee6a3164:46863 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46863 2024-11-16T05:48:26,646 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46863-0x1004712aa1b0001, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3456ee6a3164,46863,1731736063466 2024-11-16T05:48:26,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44501-0x1004712aa1b0000, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T05:48:26,646 INFO [RS:0;3456ee6a3164:46863 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T05:48:26,647 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3456ee6a3164,46863,1731736063466] 2024-11-16T05:48:26,648 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3456ee6a3164,46863,1731736063466 already deleted, retry=false 2024-11-16T05:48:26,648 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3456ee6a3164,46863,1731736063466 expired; onlineServers=0 2024-11-16T05:48:26,649 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3456ee6a3164,44501,1731736063421' ***** 2024-11-16T05:48:26,649 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T05:48:26,649 INFO [M:0;3456ee6a3164:44501 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T05:48:26,649 INFO [M:0;3456ee6a3164:44501 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T05:48:26,649 DEBUG [M:0;3456ee6a3164:44501 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T05:48:26,649 DEBUG [M:0;3456ee6a3164:44501 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T05:48:26,649 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T05:48:26,649 DEBUG [master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.small.0-1731736063664 {}] cleaner.HFileCleaner(306): Exit Thread[master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.small.0-1731736063664,5,FailOnTimeoutGroup] 2024-11-16T05:48:26,649 DEBUG [master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.large.0-1731736063664 {}] cleaner.HFileCleaner(306): Exit Thread[master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.large.0-1731736063664,5,FailOnTimeoutGroup] 2024-11-16T05:48:26,650 INFO [M:0;3456ee6a3164:44501 {}] hbase.ChoreService(370): Chore service for: master/3456ee6a3164:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T05:48:26,650 INFO [M:0;3456ee6a3164:44501 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T05:48:26,650 DEBUG [M:0;3456ee6a3164:44501 {}] master.HMaster(1795): Stopping service threads 2024-11-16T05:48:26,650 INFO [M:0;3456ee6a3164:44501 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T05:48:26,650 INFO [M:0;3456ee6a3164:44501 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T05:48:26,650 INFO [M:0;3456ee6a3164:44501 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T05:48:26,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44501-0x1004712aa1b0000, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T05:48:26,651 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T05:48:26,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44501-0x1004712aa1b0000, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:48:26,651 DEBUG [M:0;3456ee6a3164:44501 {}] zookeeper.ZKUtil(347): master:44501-0x1004712aa1b0000, quorum=127.0.0.1:50879, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T05:48:26,651 WARN [M:0;3456ee6a3164:44501 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T05:48:26,652 INFO [M:0;3456ee6a3164:44501 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/.lastflushedseqids 2024-11-16T05:48:26,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40791 is added to blk_1073741898_1084 (size=130) 2024-11-16T05:48:26,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741898_1084 (size=130) 2024-11-16T05:48:26,658 INFO [M:0;3456ee6a3164:44501 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T05:48:26,658 INFO [M:0;3456ee6a3164:44501 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T05:48:26,658 DEBUG [M:0;3456ee6a3164:44501 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T05:48:26,658 INFO [M:0;3456ee6a3164:44501 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:48:26,658 DEBUG [M:0;3456ee6a3164:44501 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:48:26,658 DEBUG [M:0;3456ee6a3164:44501 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T05:48:26,658 DEBUG [M:0;3456ee6a3164:44501 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:48:26,659 INFO [M:0;3456ee6a3164:44501 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-16T05:48:26,674 DEBUG [M:0;3456ee6a3164:44501 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0083cc699f8840c9a32ef42c5709ca62 is 82, key is hbase:meta,,1/info:regioninfo/1731736064348/Put/seqid=0 2024-11-16T05:48:26,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40791 is added to blk_1073741899_1085 (size=5672) 2024-11-16T05:48:26,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741899_1085 (size=5672) 2024-11-16T05:48:26,680 INFO [M:0;3456ee6a3164:44501 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0083cc699f8840c9a32ef42c5709ca62 2024-11-16T05:48:26,700 DEBUG [M:0;3456ee6a3164:44501 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3cede50fa8694d44818ed942ff0d9395 is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731736064917/Put/seqid=0 2024-11-16T05:48:26,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40791 is added to blk_1073741900_1086 (size=6255) 2024-11-16T05:48:26,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741900_1086 (size=6255) 2024-11-16T05:48:26,705 INFO [M:0;3456ee6a3164:44501 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3cede50fa8694d44818ed942ff0d9395 2024-11-16T05:48:26,710 INFO [M:0;3456ee6a3164:44501 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3cede50fa8694d44818ed942ff0d9395 2024-11-16T05:48:26,724 DEBUG [M:0;3456ee6a3164:44501 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ce3c528dda3c4017869acec4d2fef280 is 69, key is 3456ee6a3164,41701,1731736064423/rs:state/1731736064472/Put/seqid=0 2024-11-16T05:48:26,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40791 is added to blk_1073741901_1087 (size=5224) 2024-11-16T05:48:26,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741901_1087 (size=5224) 2024-11-16T05:48:26,730 INFO [M:0;3456ee6a3164:44501 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ce3c528dda3c4017869acec4d2fef280 2024-11-16T05:48:26,747 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46863-0x1004712aa1b0001, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T05:48:26,747 INFO [RS:0;3456ee6a3164:46863 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T05:48:26,747 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46863-0x1004712aa1b0001, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T05:48:26,747 INFO [RS:0;3456ee6a3164:46863 {}] regionserver.HRegionServer(1031): Exiting; stopping=3456ee6a3164,46863,1731736063466; zookeeper connection closed. 2024-11-16T05:48:26,748 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@790080b0 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@790080b0 2024-11-16T05:48:26,748 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-16T05:48:26,751 DEBUG [M:0;3456ee6a3164:44501 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/abd92949cff54565bc5e0e141f51a2fb is 52, key is load_balancer_on/state:d/1731736064408/Put/seqid=0 2024-11-16T05:48:26,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40791 is added to blk_1073741902_1088 (size=5056) 2024-11-16T05:48:26,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741902_1088 (size=5056) 2024-11-16T05:48:26,756 INFO [M:0;3456ee6a3164:44501 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/abd92949cff54565bc5e0e141f51a2fb 2024-11-16T05:48:26,762 DEBUG [M:0;3456ee6a3164:44501 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0083cc699f8840c9a32ef42c5709ca62 as hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0083cc699f8840c9a32ef42c5709ca62 2024-11-16T05:48:26,769 INFO [M:0;3456ee6a3164:44501 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0083cc699f8840c9a32ef42c5709ca62, entries=8, sequenceid=60, filesize=5.5 K 2024-11-16T05:48:26,770 DEBUG [M:0;3456ee6a3164:44501 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3cede50fa8694d44818ed942ff0d9395 as hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3cede50fa8694d44818ed942ff0d9395 2024-11-16T05:48:26,775 INFO [M:0;3456ee6a3164:44501 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3cede50fa8694d44818ed942ff0d9395 2024-11-16T05:48:26,775 INFO [M:0;3456ee6a3164:44501 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3cede50fa8694d44818ed942ff0d9395, entries=6, sequenceid=60, filesize=6.1 K 2024-11-16T05:48:26,777 DEBUG [M:0;3456ee6a3164:44501 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ce3c528dda3c4017869acec4d2fef280 as hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ce3c528dda3c4017869acec4d2fef280 2024-11-16T05:48:26,782 INFO [M:0;3456ee6a3164:44501 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ce3c528dda3c4017869acec4d2fef280, entries=2, sequenceid=60, filesize=5.1 K 2024-11-16T05:48:26,783 DEBUG [M:0;3456ee6a3164:44501 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/abd92949cff54565bc5e0e141f51a2fb as hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/abd92949cff54565bc5e0e141f51a2fb 2024-11-16T05:48:26,788 INFO [M:0;3456ee6a3164:44501 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/abd92949cff54565bc5e0e141f51a2fb, entries=1, sequenceid=60, filesize=4.9 K 2024-11-16T05:48:26,789 INFO [M:0;3456ee6a3164:44501 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 131ms, sequenceid=60, compaction requested=false 2024-11-16T05:48:26,791 INFO [M:0;3456ee6a3164:44501 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:48:26,791 DEBUG [M:0;3456ee6a3164:44501 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731736106658Disabling compacts and flushes for region at 1731736106658Disabling writes for close at 1731736106658Obtaining lock to block concurrent updates at 1731736106659 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731736106659Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1731736106659Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731736106660 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731736106660Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731736106673 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731736106673Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731736106685 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731736106699 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731736106699Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731736106711 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731736106724 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731736106724Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731736106735 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731736106750 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731736106750Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3c6957f: reopening flushed file at 1731736106761 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@26b6a5e3: reopening flushed file at 1731736106769 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@509c45f4: reopening flushed file at 1731736106776 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6bdf9916: reopening flushed file at 1731736106782 (+6 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 131ms, sequenceid=60, compaction requested=false at 1731736106789 (+7 ms)Writing region close event to WAL at 1731736106791 (+2 ms)Closed at 1731736106791 2024-11-16T05:48:26,791 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:26,791 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:26,792 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:26,792 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:26,792 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:26,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40791 is added to blk_1073741889_1072 (size=1045) 2024-11-16T05:48:26,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741889_1072 (size=1045) 2024-11-16T05:48:27,002 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T05:48:27,022 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:27,022 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:27,023 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:27,023 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:27,023 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:27,027 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:27,027 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:27,029 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:27,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:27,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:27,535 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@74988e58 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-721148889-172.17.0.2-1731736062832:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:40457,null,null]) java.net.ConnectException: Call From 3456ee6a3164/172.17.0.2 to localhost:35993 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-16T05:48:27,696 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/WALs/3456ee6a3164,44501,1731736063421/3456ee6a3164%2C44501%2C1731736063421.1731736063577 to hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/oldWALs/3456ee6a3164%2C44501%2C1731736063421.1731736063577 2024-11-16T05:48:27,704 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/MasterData/oldWALs/3456ee6a3164%2C44501%2C1731736063421.1731736063577 to hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/oldWALs/3456ee6a3164%2C44501%2C1731736063421.1731736063577$masterlocalwal$ 2024-11-16T05:48:27,704 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T05:48:27,704 INFO [M:0;3456ee6a3164:44501 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T05:48:27,704 INFO [M:0;3456ee6a3164:44501 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44501 2024-11-16T05:48:27,704 INFO [M:0;3456ee6a3164:44501 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T05:48:27,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44501-0x1004712aa1b0000, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T05:48:27,807 INFO [M:0;3456ee6a3164:44501 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T05:48:27,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44501-0x1004712aa1b0000, quorum=127.0.0.1:50879, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T05:48:27,814 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7085983e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:48:27,814 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5dbb3308{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T05:48:27,814 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T05:48:27,815 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f9c830c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T05:48:27,815 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6287ee78{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/hadoop.log.dir/,STOPPED} 2024-11-16T05:48:27,818 WARN [BP-721148889-172.17.0.2-1731736062832 heartbeating to localhost/127.0.0.1:36821 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T05:48:27,818 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T05:48:27,818 WARN [BP-721148889-172.17.0.2-1731736062832 heartbeating to localhost/127.0.0.1:36821 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-721148889-172.17.0.2-1731736062832 (Datanode Uuid aadbfb3b-707a-4c20-b5d9-9812720a1509) service to localhost/127.0.0.1:36821 2024-11-16T05:48:27,818 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T05:48:27,817 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2298d67a {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-721148889-172.17.0.2-1731736062832:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:40457,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:35993 , LocalHost:localPort 3456ee6a3164/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-16T05:48:27,819 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2298d67a {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-721148889-172.17.0.2-1731736062832:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:40791,null,null], DatanodeInfoWithStorage[127.0.0.1:40457,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-721148889-172.17.0.2-1731736062832 2024-11-16T05:48:27,819 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data3/current/BP-721148889-172.17.0.2-1731736062832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:48:27,820 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data4/current/BP-721148889-172.17.0.2-1731736062832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:48:27,820 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T05:48:27,823 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2ab2015c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:48:27,823 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@40a289c6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T05:48:27,823 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T05:48:27,823 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@187c1982{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T05:48:27,823 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24345a11{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/hadoop.log.dir/,STOPPED} 2024-11-16T05:48:27,824 WARN [BP-721148889-172.17.0.2-1731736062832 heartbeating to localhost/127.0.0.1:36821 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T05:48:27,824 WARN [BP-721148889-172.17.0.2-1731736062832 heartbeating to localhost/127.0.0.1:36821 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-721148889-172.17.0.2-1731736062832 (Datanode Uuid 6c6b2bbe-587c-4d27-820d-03bc9d6cf66c) service to localhost/127.0.0.1:36821 2024-11-16T05:48:27,824 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T05:48:27,825 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T05:48:27,825 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data5/current/BP-721148889-172.17.0.2-1731736062832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:48:27,825 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/cluster_c2d9c991-3f31-0193-a817-a262b593875d/data/data6/current/BP-721148889-172.17.0.2-1731736062832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:48:27,825 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T05:48:27,830 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1b2c714b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T05:48:27,830 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@78100011{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T05:48:27,830 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T05:48:27,830 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@302502f5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T05:48:27,830 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4799fc2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/hadoop.log.dir/,STOPPED} 2024-11-16T05:48:27,839 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T05:48:27,868 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T05:48:27,876 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=156 (was 81) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1436593709) connection to localhost/127.0.0.1:36821 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:36821 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007fb8f4bf4b78.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46059 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:36821 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36821 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1436593709) connection to localhost/127.0.0.1:36821 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36821 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36821 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:46059 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1436593709) connection to localhost/127.0.0.1:36821 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36821 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1436593709) connection to localhost/127.0.0.1:36821 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36821 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007fb8f4bf4b78.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 404) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=86 (was 145), ProcessCount=11 (was 11), AvailableMemoryMB=3235 (was 3805) 2024-11-16T05:48:27,882 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=155, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=86, ProcessCount=11, AvailableMemoryMB=3234 2024-11-16T05:48:27,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T05:48:27,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/hadoop.log.dir so I do NOT create it in target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0 2024-11-16T05:48:27,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/afff9bc5-11a3-1e09-2709-65da514aae63/hadoop.tmp.dir so I do NOT create it in target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0 2024-11-16T05:48:27,883 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/cluster_daf90692-0304-c672-230d-648cdecaa0bf, deleteOnExit=true 2024-11-16T05:48:27,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T05:48:27,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/test.cache.data in system properties and HBase conf 2024-11-16T05:48:27,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T05:48:27,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/hadoop.log.dir in system properties and HBase conf 2024-11-16T05:48:27,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T05:48:27,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T05:48:27,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T05:48:27,884 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T05:48:27,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T05:48:27,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T05:48:27,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T05:48:27,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T05:48:27,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T05:48:27,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T05:48:27,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T05:48:27,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T05:48:27,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T05:48:27,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/nfs.dump.dir in system properties and HBase conf 2024-11-16T05:48:27,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/java.io.tmpdir in system properties and HBase conf 2024-11-16T05:48:27,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T05:48:27,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T05:48:27,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T05:48:27,896 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T05:48:27,946 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:48:27,950 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T05:48:27,953 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T05:48:27,953 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T05:48:27,953 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T05:48:27,954 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:48:27,954 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4778755b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/hadoop.log.dir/,AVAILABLE} 2024-11-16T05:48:27,955 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49217387{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T05:48:28,047 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@24f1fcf5{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/java.io.tmpdir/jetty-localhost-35969-hadoop-hdfs-3_4_1-tests_jar-_-any-8496759923359953267/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T05:48:28,048 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@35d68916{HTTP/1.1, (http/1.1)}{localhost:35969} 2024-11-16T05:48:28,048 INFO [Time-limited test {}] server.Server(415): Started @149746ms 2024-11-16T05:48:28,059 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T05:48:28,116 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:48:28,119 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T05:48:28,120 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T05:48:28,120 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T05:48:28,120 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T05:48:28,120 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60326694{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/hadoop.log.dir/,AVAILABLE} 2024-11-16T05:48:28,120 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@302c0dd9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T05:48:28,213 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@759e22d7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/java.io.tmpdir/jetty-localhost-45141-hadoop-hdfs-3_4_1-tests_jar-_-any-13469462191955218057/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:48:28,214 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7f6bebc0{HTTP/1.1, (http/1.1)}{localhost:45141} 2024-11-16T05:48:28,214 INFO [Time-limited test {}] server.Server(415): Started @149913ms 2024-11-16T05:48:28,215 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T05:48:28,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:28,245 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:48:28,248 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T05:48:28,249 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T05:48:28,249 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T05:48:28,250 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T05:48:28,250 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@19254d5f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/hadoop.log.dir/,AVAILABLE} 2024-11-16T05:48:28,251 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@67c5595d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T05:48:28,281 WARN [Thread-1188 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/cluster_daf90692-0304-c672-230d-648cdecaa0bf/data/data1/current/BP-82948210-172.17.0.2-1731736107907/current, will proceed with Du for space computation calculation, 2024-11-16T05:48:28,281 WARN [Thread-1189 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/cluster_daf90692-0304-c672-230d-648cdecaa0bf/data/data2/current/BP-82948210-172.17.0.2-1731736107907/current, will proceed with Du for space computation calculation, 2024-11-16T05:48:28,302 WARN [Thread-1167 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T05:48:28,304 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe9abb4a20ef8417 with lease ID 0xf69a32e29d2883d2: Processing first storage report for DS-de00379a-4ac6-459e-ab7b-66b3e2af21f3 from datanode DatanodeRegistration(127.0.0.1:37901, datanodeUuid=600a9378-0940-4496-bda0-110092f00c86, infoPort=33925, infoSecurePort=0, ipcPort=35243, storageInfo=lv=-57;cid=testClusterID;nsid=938027457;c=1731736107907) 2024-11-16T05:48:28,304 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe9abb4a20ef8417 with lease ID 0xf69a32e29d2883d2: from storage DS-de00379a-4ac6-459e-ab7b-66b3e2af21f3 node DatanodeRegistration(127.0.0.1:37901, datanodeUuid=600a9378-0940-4496-bda0-110092f00c86, infoPort=33925, infoSecurePort=0, ipcPort=35243, storageInfo=lv=-57;cid=testClusterID;nsid=938027457;c=1731736107907), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:48:28,305 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe9abb4a20ef8417 with lease ID 0xf69a32e29d2883d2: Processing first storage report for DS-4f04841f-9be5-4dbe-b91b-a852dfdbff33 from datanode DatanodeRegistration(127.0.0.1:37901, datanodeUuid=600a9378-0940-4496-bda0-110092f00c86, infoPort=33925, infoSecurePort=0, ipcPort=35243, storageInfo=lv=-57;cid=testClusterID;nsid=938027457;c=1731736107907) 2024-11-16T05:48:28,305 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe9abb4a20ef8417 with lease ID 0xf69a32e29d2883d2: from storage DS-4f04841f-9be5-4dbe-b91b-a852dfdbff33 node DatanodeRegistration(127.0.0.1:37901, datanodeUuid=600a9378-0940-4496-bda0-110092f00c86, infoPort=33925, infoSecurePort=0, ipcPort=35243, storageInfo=lv=-57;cid=testClusterID;nsid=938027457;c=1731736107907), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:48:28,348 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5b537c13{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/java.io.tmpdir/jetty-localhost-35291-hadoop-hdfs-3_4_1-tests_jar-_-any-17616123702841402143/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:48:28,349 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6a93f88d{HTTP/1.1, (http/1.1)}{localhost:35291} 2024-11-16T05:48:28,349 INFO [Time-limited test {}] server.Server(415): Started @150047ms 2024-11-16T05:48:28,350 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T05:48:28,415 WARN [Thread-1214 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/cluster_daf90692-0304-c672-230d-648cdecaa0bf/data/data3/current/BP-82948210-172.17.0.2-1731736107907/current, will proceed with Du for space computation calculation, 2024-11-16T05:48:28,415 WARN [Thread-1215 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/cluster_daf90692-0304-c672-230d-648cdecaa0bf/data/data4/current/BP-82948210-172.17.0.2-1731736107907/current, will proceed with Du for space computation calculation, 2024-11-16T05:48:28,431 WARN [Thread-1203 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T05:48:28,433 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3129560d4f5067be with lease ID 0xf69a32e29d2883d3: Processing first storage report for DS-d42973f3-5d06-454e-9173-095fed398d89 from datanode DatanodeRegistration(127.0.0.1:36971, datanodeUuid=1347d7bb-63b5-4749-bd1d-eca9f1149477, infoPort=39233, infoSecurePort=0, ipcPort=39555, storageInfo=lv=-57;cid=testClusterID;nsid=938027457;c=1731736107907) 2024-11-16T05:48:28,433 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3129560d4f5067be with lease ID 0xf69a32e29d2883d3: from storage DS-d42973f3-5d06-454e-9173-095fed398d89 node DatanodeRegistration(127.0.0.1:36971, datanodeUuid=1347d7bb-63b5-4749-bd1d-eca9f1149477, infoPort=39233, infoSecurePort=0, ipcPort=39555, storageInfo=lv=-57;cid=testClusterID;nsid=938027457;c=1731736107907), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T05:48:28,433 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3129560d4f5067be with lease ID 0xf69a32e29d2883d3: Processing first storage report for DS-bd2b6539-0c38-4c5a-87bf-d616e2c2afec from datanode DatanodeRegistration(127.0.0.1:36971, datanodeUuid=1347d7bb-63b5-4749-bd1d-eca9f1149477, infoPort=39233, infoSecurePort=0, ipcPort=39555, storageInfo=lv=-57;cid=testClusterID;nsid=938027457;c=1731736107907) 2024-11-16T05:48:28,433 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3129560d4f5067be with lease ID 0xf69a32e29d2883d3: from storage DS-bd2b6539-0c38-4c5a-87bf-d616e2c2afec node DatanodeRegistration(127.0.0.1:36971, datanodeUuid=1347d7bb-63b5-4749-bd1d-eca9f1149477, infoPort=39233, infoSecurePort=0, ipcPort=39555, storageInfo=lv=-57;cid=testClusterID;nsid=938027457;c=1731736107907), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:48:28,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:28,476 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0 2024-11-16T05:48:28,481 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/cluster_daf90692-0304-c672-230d-648cdecaa0bf/zookeeper_0, clientPort=61239, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/cluster_daf90692-0304-c672-230d-648cdecaa0bf/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/cluster_daf90692-0304-c672-230d-648cdecaa0bf/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T05:48:28,483 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61239 2024-11-16T05:48:28,484 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:48:28,486 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:48:28,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37901 is added to blk_1073741825_1001 (size=7) 2024-11-16T05:48:28,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36971 is added to blk_1073741825_1001 (size=7) 2024-11-16T05:48:28,495 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6 with version=8 2024-11-16T05:48:28,495 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/hbase-staging 2024-11-16T05:48:28,497 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3456ee6a3164:0 server-side Connection retries=45 2024-11-16T05:48:28,497 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T05:48:28,497 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T05:48:28,497 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T05:48:28,497 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T05:48:28,497 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T05:48:28,497 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T05:48:28,498 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T05:48:28,498 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34493 2024-11-16T05:48:28,500 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34493 connecting to ZooKeeper ensemble=127.0.0.1:61239 2024-11-16T05:48:28,506 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:344930x0, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T05:48:28,506 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34493-0x10047135a2e0000 connected 2024-11-16T05:48:28,518 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:48:28,521 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:48:28,524 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34493-0x10047135a2e0000, quorum=127.0.0.1:61239, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T05:48:28,524 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6, hbase.cluster.distributed=false 2024-11-16T05:48:28,526 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34493-0x10047135a2e0000, quorum=127.0.0.1:61239, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T05:48:28,527 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34493 2024-11-16T05:48:28,527 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34493 2024-11-16T05:48:28,527 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34493 2024-11-16T05:48:28,528 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34493 2024-11-16T05:48:28,528 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34493 2024-11-16T05:48:28,541 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3456ee6a3164:0 server-side Connection retries=45 2024-11-16T05:48:28,542 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T05:48:28,542 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T05:48:28,542 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T05:48:28,542 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T05:48:28,542 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T05:48:28,542 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T05:48:28,542 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T05:48:28,543 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32939 2024-11-16T05:48:28,544 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:32939 connecting to ZooKeeper ensemble=127.0.0.1:61239 2024-11-16T05:48:28,545 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:48:28,546 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:48:28,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:329390x0, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T05:48:28,550 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:32939-0x10047135a2e0001 connected 2024-11-16T05:48:28,550 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32939-0x10047135a2e0001, quorum=127.0.0.1:61239, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T05:48:28,550 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T05:48:28,552 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T05:48:28,552 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32939-0x10047135a2e0001, quorum=127.0.0.1:61239, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T05:48:28,553 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32939-0x10047135a2e0001, quorum=127.0.0.1:61239, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T05:48:28,554 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32939 2024-11-16T05:48:28,554 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32939 2024-11-16T05:48:28,557 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32939 2024-11-16T05:48:28,557 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32939 2024-11-16T05:48:28,559 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32939 2024-11-16T05:48:28,569 DEBUG [M:0;3456ee6a3164:34493 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3456ee6a3164:34493 2024-11-16T05:48:28,570 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3456ee6a3164,34493,1731736108497 2024-11-16T05:48:28,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34493-0x10047135a2e0000, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T05:48:28,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32939-0x10047135a2e0001, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T05:48:28,572 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34493-0x10047135a2e0000, quorum=127.0.0.1:61239, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3456ee6a3164,34493,1731736108497 2024-11-16T05:48:28,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32939-0x10047135a2e0001, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T05:48:28,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34493-0x10047135a2e0000, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:48:28,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32939-0x10047135a2e0001, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:48:28,573 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34493-0x10047135a2e0000, quorum=127.0.0.1:61239, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T05:48:28,573 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3456ee6a3164,34493,1731736108497 from backup master directory 2024-11-16T05:48:28,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34493-0x10047135a2e0000, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3456ee6a3164,34493,1731736108497 2024-11-16T05:48:28,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32939-0x10047135a2e0001, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T05:48:28,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34493-0x10047135a2e0000, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T05:48:28,574 WARN [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T05:48:28,574 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3456ee6a3164,34493,1731736108497 2024-11-16T05:48:28,579 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/hbase.id] with ID: cac62492-4529-4a10-b4ec-9e3854392d21 2024-11-16T05:48:28,579 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/.tmp/hbase.id 2024-11-16T05:48:28,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37901 is added to blk_1073741826_1002 (size=42) 2024-11-16T05:48:28,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36971 is added to blk_1073741826_1002 (size=42) 2024-11-16T05:48:28,587 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/.tmp/hbase.id]:[hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/hbase.id] 2024-11-16T05:48:28,599 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:48:28,599 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T05:48:28,600 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-16T05:48:28,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32939-0x10047135a2e0001, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:48:28,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34493-0x10047135a2e0000, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:48:28,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36971 is added to blk_1073741827_1003 (size=196) 2024-11-16T05:48:28,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37901 is added to blk_1073741827_1003 (size=196) 2024-11-16T05:48:28,609 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T05:48:28,609 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T05:48:28,610 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T05:48:28,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36971 is added to blk_1073741828_1004 (size=1189) 2024-11-16T05:48:28,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37901 is added to blk_1073741828_1004 (size=1189) 2024-11-16T05:48:28,618 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/data/master/store 2024-11-16T05:48:28,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36971 is added to blk_1073741829_1005 (size=34) 2024-11-16T05:48:28,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37901 is added to blk_1073741829_1005 (size=34) 2024-11-16T05:48:28,625 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:48:28,625 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T05:48:28,625 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:48:28,625 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:48:28,625 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T05:48:28,625 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:48:28,625 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:48:28,625 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731736108625Disabling compacts and flushes for region at 1731736108625Disabling writes for close at 1731736108625Writing region close event to WAL at 1731736108625Closed at 1731736108625 2024-11-16T05:48:28,626 WARN [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/data/master/store/.initializing 2024-11-16T05:48:28,626 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/WALs/3456ee6a3164,34493,1731736108497 2024-11-16T05:48:28,629 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3456ee6a3164%2C34493%2C1731736108497, suffix=, logDir=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/WALs/3456ee6a3164,34493,1731736108497, archiveDir=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/oldWALs, maxLogs=10 2024-11-16T05:48:28,629 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C34493%2C1731736108497.1731736108629 2024-11-16T05:48:28,634 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/WALs/3456ee6a3164,34493,1731736108497/3456ee6a3164%2C34493%2C1731736108497.1731736108629 2024-11-16T05:48:28,638 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39233:39233),(127.0.0.1/127.0.0.1:33925:33925)] 2024-11-16T05:48:28,639 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T05:48:28,639 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:48:28,639 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:48:28,639 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:48:28,640 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:48:28,642 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T05:48:28,642 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:48:28,642 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:48:28,642 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:48:28,643 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T05:48:28,643 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:48:28,644 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T05:48:28,644 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:48:28,645 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T05:48:28,645 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:48:28,645 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T05:48:28,645 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:48:28,646 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T05:48:28,647 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:48:28,647 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T05:48:28,647 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:48:28,648 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:48:28,648 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:48:28,650 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:48:28,650 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:48:28,651 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T05:48:28,652 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:48:28,654 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T05:48:28,655 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=811116, jitterRate=0.03138798475265503}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T05:48:28,655 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731736108639Initializing all the Stores at 1731736108640 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736108640Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736108640Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736108640Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736108640Cleaning up temporary data from old regions at 1731736108650 (+10 ms)Region opened successfully at 1731736108655 (+5 ms) 2024-11-16T05:48:28,655 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T05:48:28,659 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@158a6dfe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3456ee6a3164/172.17.0.2:0 2024-11-16T05:48:28,660 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T05:48:28,660 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T05:48:28,660 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T05:48:28,660 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T05:48:28,661 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T05:48:28,661 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T05:48:28,661 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T05:48:28,663 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T05:48:28,664 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34493-0x10047135a2e0000, quorum=127.0.0.1:61239, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T05:48:28,666 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T05:48:28,667 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T05:48:28,668 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34493-0x10047135a2e0000, quorum=127.0.0.1:61239, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T05:48:28,668 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T05:48:28,669 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T05:48:28,670 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34493-0x10047135a2e0000, quorum=127.0.0.1:61239, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T05:48:28,671 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T05:48:28,673 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34493-0x10047135a2e0000, quorum=127.0.0.1:61239, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T05:48:28,674 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T05:48:28,676 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34493-0x10047135a2e0000, quorum=127.0.0.1:61239, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T05:48:28,676 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T05:48:28,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32939-0x10047135a2e0001, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T05:48:28,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34493-0x10047135a2e0000, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T05:48:28,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32939-0x10047135a2e0001, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:48:28,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34493-0x10047135a2e0000, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:48:28,678 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3456ee6a3164,34493,1731736108497, sessionid=0x10047135a2e0000, setting cluster-up flag (Was=false) 2024-11-16T05:48:28,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32939-0x10047135a2e0001, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:48:28,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34493-0x10047135a2e0000, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:48:28,683 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T05:48:28,684 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3456ee6a3164,34493,1731736108497 2024-11-16T05:48:28,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34493-0x10047135a2e0000, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:48:28,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32939-0x10047135a2e0001, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:48:28,689 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T05:48:28,690 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3456ee6a3164,34493,1731736108497 2024-11-16T05:48:28,691 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T05:48:28,693 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T05:48:28,693 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T05:48:28,693 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T05:48:28,694 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3456ee6a3164,34493,1731736108497 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T05:48:28,695 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3456ee6a3164:0, corePoolSize=5, maxPoolSize=5 2024-11-16T05:48:28,695 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3456ee6a3164:0, corePoolSize=5, maxPoolSize=5 2024-11-16T05:48:28,695 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3456ee6a3164:0, corePoolSize=5, maxPoolSize=5 2024-11-16T05:48:28,695 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3456ee6a3164:0, corePoolSize=5, maxPoolSize=5 2024-11-16T05:48:28,695 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3456ee6a3164:0, corePoolSize=10, maxPoolSize=10 2024-11-16T05:48:28,696 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:48:28,696 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3456ee6a3164:0, corePoolSize=2, maxPoolSize=2 2024-11-16T05:48:28,696 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:48:28,699 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T05:48:28,699 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T05:48:28,700 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:48:28,700 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T05:48:28,709 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731736138708 2024-11-16T05:48:28,709 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T05:48:28,709 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T05:48:28,709 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T05:48:28,709 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T05:48:28,709 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T05:48:28,709 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T05:48:28,710 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T05:48:28,710 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T05:48:28,710 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T05:48:28,710 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T05:48:28,711 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T05:48:28,711 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T05:48:28,711 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.large.0-1731736108711,5,FailOnTimeoutGroup] 2024-11-16T05:48:28,712 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.small.0-1731736108711,5,FailOnTimeoutGroup] 2024-11-16T05:48:28,712 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T05:48:28,712 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T05:48:28,712 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T05:48:28,712 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T05:48:28,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37901 is added to blk_1073741831_1007 (size=1321) 2024-11-16T05:48:28,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36971 is added to blk_1073741831_1007 (size=1321) 2024-11-16T05:48:28,718 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T05:48:28,718 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6 2024-11-16T05:48:28,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37901 is added to blk_1073741832_1008 (size=32) 2024-11-16T05:48:28,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36971 is added to blk_1073741832_1008 (size=32) 2024-11-16T05:48:28,727 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:48:28,728 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T05:48:28,730 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T05:48:28,730 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:48:28,731 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:48:28,731 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T05:48:28,732 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T05:48:28,732 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:48:28,733 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:48:28,733 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T05:48:28,734 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T05:48:28,734 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:48:28,735 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:48:28,735 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T05:48:28,736 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T05:48:28,736 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:48:28,737 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:48:28,737 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T05:48:28,738 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/data/hbase/meta/1588230740 2024-11-16T05:48:28,738 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/data/hbase/meta/1588230740 2024-11-16T05:48:28,740 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T05:48:28,740 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T05:48:28,740 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T05:48:28,741 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T05:48:28,743 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T05:48:28,744 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=810655, jitterRate=0.030801430344581604}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T05:48:28,744 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731736108727Initializing all the Stores at 1731736108728 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736108728Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736108728Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736108728Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736108728Cleaning up temporary data from old regions at 1731736108740 (+12 ms)Region opened successfully at 1731736108744 (+4 ms) 2024-11-16T05:48:28,744 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T05:48:28,744 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T05:48:28,744 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T05:48:28,745 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T05:48:28,745 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T05:48:28,745 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T05:48:28,745 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731736108744Disabling compacts and flushes for region at 1731736108744Disabling writes for close at 1731736108745 (+1 ms)Writing region close event to WAL at 1731736108745Closed at 1731736108745 2024-11-16T05:48:28,746 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T05:48:28,746 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T05:48:28,746 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T05:48:28,748 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T05:48:28,749 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T05:48:28,761 INFO [RS:0;3456ee6a3164:32939 {}] regionserver.HRegionServer(746): ClusterId : cac62492-4529-4a10-b4ec-9e3854392d21 2024-11-16T05:48:28,761 DEBUG [RS:0;3456ee6a3164:32939 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T05:48:28,763 DEBUG [RS:0;3456ee6a3164:32939 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T05:48:28,763 DEBUG [RS:0;3456ee6a3164:32939 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T05:48:28,765 DEBUG [RS:0;3456ee6a3164:32939 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T05:48:28,765 DEBUG [RS:0;3456ee6a3164:32939 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@447e4cea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3456ee6a3164/172.17.0.2:0 2024-11-16T05:48:28,775 DEBUG [RS:0;3456ee6a3164:32939 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3456ee6a3164:32939 2024-11-16T05:48:28,775 INFO [RS:0;3456ee6a3164:32939 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T05:48:28,775 INFO [RS:0;3456ee6a3164:32939 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T05:48:28,776 DEBUG [RS:0;3456ee6a3164:32939 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T05:48:28,776 INFO [RS:0;3456ee6a3164:32939 {}] regionserver.HRegionServer(2659): reportForDuty to master=3456ee6a3164,34493,1731736108497 with port=32939, startcode=1731736108541 2024-11-16T05:48:28,776 DEBUG [RS:0;3456ee6a3164:32939 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T05:48:28,778 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48587, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T05:48:28,779 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34493 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3456ee6a3164,32939,1731736108541 2024-11-16T05:48:28,779 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34493 {}] master.ServerManager(517): Registering regionserver=3456ee6a3164,32939,1731736108541 2024-11-16T05:48:28,780 DEBUG [RS:0;3456ee6a3164:32939 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6 2024-11-16T05:48:28,780 DEBUG [RS:0;3456ee6a3164:32939 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41893 2024-11-16T05:48:28,780 DEBUG [RS:0;3456ee6a3164:32939 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T05:48:28,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34493-0x10047135a2e0000, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T05:48:28,783 DEBUG [RS:0;3456ee6a3164:32939 {}] zookeeper.ZKUtil(111): regionserver:32939-0x10047135a2e0001, quorum=127.0.0.1:61239, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3456ee6a3164,32939,1731736108541 2024-11-16T05:48:28,783 WARN [RS:0;3456ee6a3164:32939 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T05:48:28,783 INFO [RS:0;3456ee6a3164:32939 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T05:48:28,783 DEBUG [RS:0;3456ee6a3164:32939 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541 2024-11-16T05:48:28,783 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3456ee6a3164,32939,1731736108541] 2024-11-16T05:48:28,786 INFO [RS:0;3456ee6a3164:32939 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T05:48:28,788 INFO [RS:0;3456ee6a3164:32939 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T05:48:28,788 INFO [RS:0;3456ee6a3164:32939 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T05:48:28,788 INFO [RS:0;3456ee6a3164:32939 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T05:48:28,788 INFO [RS:0;3456ee6a3164:32939 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T05:48:28,789 INFO [RS:0;3456ee6a3164:32939 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T05:48:28,789 INFO [RS:0;3456ee6a3164:32939 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T05:48:28,790 DEBUG [RS:0;3456ee6a3164:32939 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:48:28,790 DEBUG [RS:0;3456ee6a3164:32939 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:48:28,790 DEBUG [RS:0;3456ee6a3164:32939 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:48:28,790 DEBUG [RS:0;3456ee6a3164:32939 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:48:28,790 DEBUG [RS:0;3456ee6a3164:32939 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:48:28,790 DEBUG [RS:0;3456ee6a3164:32939 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3456ee6a3164:0, corePoolSize=2, maxPoolSize=2 2024-11-16T05:48:28,790 DEBUG [RS:0;3456ee6a3164:32939 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:48:28,790 DEBUG [RS:0;3456ee6a3164:32939 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:48:28,790 DEBUG [RS:0;3456ee6a3164:32939 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:48:28,790 DEBUG [RS:0;3456ee6a3164:32939 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:48:28,790 DEBUG [RS:0;3456ee6a3164:32939 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:48:28,790 DEBUG [RS:0;3456ee6a3164:32939 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:48:28,790 DEBUG [RS:0;3456ee6a3164:32939 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0, corePoolSize=3, maxPoolSize=3 2024-11-16T05:48:28,790 DEBUG [RS:0;3456ee6a3164:32939 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0, corePoolSize=3, maxPoolSize=3 2024-11-16T05:48:28,791 INFO [RS:0;3456ee6a3164:32939 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T05:48:28,791 INFO [RS:0;3456ee6a3164:32939 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T05:48:28,791 INFO [RS:0;3456ee6a3164:32939 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T05:48:28,791 INFO [RS:0;3456ee6a3164:32939 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T05:48:28,791 INFO [RS:0;3456ee6a3164:32939 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T05:48:28,791 INFO [RS:0;3456ee6a3164:32939 {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,32939,1731736108541-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T05:48:28,806 INFO [RS:0;3456ee6a3164:32939 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T05:48:28,806 INFO [RS:0;3456ee6a3164:32939 {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,32939,1731736108541-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T05:48:28,806 INFO [RS:0;3456ee6a3164:32939 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:48:28,806 INFO [RS:0;3456ee6a3164:32939 {}] regionserver.Replication(171): 3456ee6a3164,32939,1731736108541 started 2024-11-16T05:48:28,819 INFO [RS:0;3456ee6a3164:32939 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:48:28,819 INFO [RS:0;3456ee6a3164:32939 {}] regionserver.HRegionServer(1482): Serving as 3456ee6a3164,32939,1731736108541, RpcServer on 3456ee6a3164/172.17.0.2:32939, sessionid=0x10047135a2e0001 2024-11-16T05:48:28,819 DEBUG [RS:0;3456ee6a3164:32939 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T05:48:28,819 DEBUG [RS:0;3456ee6a3164:32939 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3456ee6a3164,32939,1731736108541 2024-11-16T05:48:28,819 DEBUG [RS:0;3456ee6a3164:32939 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3456ee6a3164,32939,1731736108541' 2024-11-16T05:48:28,819 DEBUG [RS:0;3456ee6a3164:32939 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T05:48:28,820 DEBUG [RS:0;3456ee6a3164:32939 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T05:48:28,820 DEBUG [RS:0;3456ee6a3164:32939 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T05:48:28,820 DEBUG [RS:0;3456ee6a3164:32939 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T05:48:28,820 DEBUG [RS:0;3456ee6a3164:32939 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3456ee6a3164,32939,1731736108541 2024-11-16T05:48:28,820 DEBUG [RS:0;3456ee6a3164:32939 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3456ee6a3164,32939,1731736108541' 2024-11-16T05:48:28,821 DEBUG [RS:0;3456ee6a3164:32939 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T05:48:28,821 DEBUG [RS:0;3456ee6a3164:32939 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T05:48:28,821 DEBUG [RS:0;3456ee6a3164:32939 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T05:48:28,821 INFO [RS:0;3456ee6a3164:32939 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T05:48:28,821 INFO [RS:0;3456ee6a3164:32939 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T05:48:28,899 WARN [3456ee6a3164:34493 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-16T05:48:28,923 INFO [RS:0;3456ee6a3164:32939 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3456ee6a3164%2C32939%2C1731736108541, suffix=, logDir=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541, archiveDir=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/oldWALs, maxLogs=32 2024-11-16T05:48:28,924 INFO [RS:0;3456ee6a3164:32939 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C32939%2C1731736108541.1731736108924 2024-11-16T05:48:28,931 INFO [RS:0;3456ee6a3164:32939 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736108924 2024-11-16T05:48:28,936 DEBUG [RS:0;3456ee6a3164:32939 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33925:33925),(127.0.0.1/127.0.0.1:39233:39233)] 2024-11-16T05:48:29,149 DEBUG [3456ee6a3164:34493 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T05:48:29,151 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3456ee6a3164,32939,1731736108541 2024-11-16T05:48:29,154 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3456ee6a3164,32939,1731736108541, state=OPENING 2024-11-16T05:48:29,156 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T05:48:29,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32939-0x10047135a2e0001, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:48:29,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34493-0x10047135a2e0000, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:48:29,160 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T05:48:29,160 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3456ee6a3164,32939,1731736108541}] 2024-11-16T05:48:29,160 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T05:48:29,160 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T05:48:29,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:29,316 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T05:48:29,320 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40781, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T05:48:29,326 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T05:48:29,326 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T05:48:29,329 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3456ee6a3164%2C32939%2C1731736108541.meta, suffix=.meta, logDir=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541, archiveDir=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/oldWALs, maxLogs=32 2024-11-16T05:48:29,330 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C32939%2C1731736108541.meta.1731736109330.meta 2024-11-16T05:48:29,337 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.meta.1731736109330.meta 2024-11-16T05:48:29,339 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39233:39233),(127.0.0.1/127.0.0.1:33925:33925)] 2024-11-16T05:48:29,340 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T05:48:29,340 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T05:48:29,340 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T05:48:29,340 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T05:48:29,340 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T05:48:29,341 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:48:29,341 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T05:48:29,341 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T05:48:29,343 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T05:48:29,344 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T05:48:29,344 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:48:29,345 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:48:29,345 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T05:48:29,346 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T05:48:29,346 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:48:29,347 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:48:29,347 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T05:48:29,348 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T05:48:29,349 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:48:29,349 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:48:29,350 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T05:48:29,351 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T05:48:29,351 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:48:29,352 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:48:29,352 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T05:48:29,353 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/data/hbase/meta/1588230740 2024-11-16T05:48:29,354 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/data/hbase/meta/1588230740 2024-11-16T05:48:29,355 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T05:48:29,355 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T05:48:29,356 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T05:48:29,357 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T05:48:29,358 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=746705, jitterRate=-0.05051565170288086}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T05:48:29,358 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T05:48:29,358 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731736109341Writing region info on filesystem at 1731736109341Initializing all the Stores at 1731736109342 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736109342Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736109343 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736109343Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736109343Cleaning up temporary data from old regions at 1731736109355 (+12 ms)Running coprocessor post-open hooks at 1731736109358 (+3 ms)Region opened successfully at 1731736109358 2024-11-16T05:48:29,359 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731736109315 2024-11-16T05:48:29,361 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T05:48:29,362 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T05:48:29,362 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,32939,1731736108541 2024-11-16T05:48:29,363 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3456ee6a3164,32939,1731736108541, state=OPEN 2024-11-16T05:48:29,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32939-0x10047135a2e0001, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T05:48:29,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34493-0x10047135a2e0000, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T05:48:29,368 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3456ee6a3164,32939,1731736108541 2024-11-16T05:48:29,368 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T05:48:29,368 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T05:48:29,371 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T05:48:29,371 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3456ee6a3164,32939,1731736108541 in 208 msec 2024-11-16T05:48:29,374 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T05:48:29,374 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 625 msec 2024-11-16T05:48:29,374 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T05:48:29,375 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T05:48:29,376 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T05:48:29,376 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,32939,1731736108541, seqNum=-1] 2024-11-16T05:48:29,376 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T05:48:29,377 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60347, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T05:48:29,383 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 690 msec 2024-11-16T05:48:29,383 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731736109383, completionTime=-1 2024-11-16T05:48:29,383 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T05:48:29,383 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-16T05:48:29,385 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-16T05:48:29,385 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731736169385 2024-11-16T05:48:29,385 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731736229385 2024-11-16T05:48:29,385 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-16T05:48:29,385 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,34493,1731736108497-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T05:48:29,385 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,34493,1731736108497-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:48:29,385 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,34493,1731736108497-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:48:29,386 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3456ee6a3164:34493, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:48:29,386 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T05:48:29,386 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T05:48:29,387 DEBUG [master/3456ee6a3164:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T05:48:29,389 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.815sec 2024-11-16T05:48:29,389 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T05:48:29,389 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T05:48:29,389 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T05:48:29,389 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T05:48:29,389 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T05:48:29,389 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,34493,1731736108497-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T05:48:29,389 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,34493,1731736108497-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T05:48:29,392 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T05:48:29,392 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T05:48:29,392 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,34493,1731736108497-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:48:29,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:29,462 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4abbe1b9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T05:48:29,463 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,34493,-1 for getting cluster id 2024-11-16T05:48:29,463 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T05:48:29,466 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'cac62492-4529-4a10-b4ec-9e3854392d21' 2024-11-16T05:48:29,467 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T05:48:29,467 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "cac62492-4529-4a10-b4ec-9e3854392d21" 2024-11-16T05:48:29,467 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48ac671a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T05:48:29,467 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,34493,-1] 2024-11-16T05:48:29,468 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T05:48:29,468 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:48:29,472 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58108, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T05:48:29,473 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8c8328b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T05:48:29,474 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T05:48:29,475 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,32939,1731736108541, seqNum=-1] 2024-11-16T05:48:29,476 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T05:48:29,478 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54766, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T05:48:29,480 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3456ee6a3164,34493,1731736108497 2024-11-16T05:48:29,480 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:48:29,484 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T05:48:29,484 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-16T05:48:29,484 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-16T05:48:29,484 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-16T05:48:29,486 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 3456ee6a3164,34493,1731736108497 2024-11-16T05:48:29,486 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3fa1405b 2024-11-16T05:48:29,486 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-16T05:48:29,489 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58110, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-16T05:48:29,489 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34493 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-16T05:48:29,490 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34493 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-16T05:48:29,490 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34493 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T05:48:29,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34493 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-16T05:48:29,494 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T05:48:29,494 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:48:29,494 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34493 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-16T05:48:29,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34493 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T05:48:29,496 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T05:48:29,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36971 is added to blk_1073741835_1011 (size=395) 2024-11-16T05:48:29,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37901 is added to blk_1073741835_1011 (size=395) 2024-11-16T05:48:29,504 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 398a98ac1da521fd64df4d7a31ba5467, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731736109489.398a98ac1da521fd64df4d7a31ba5467.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6 2024-11-16T05:48:29,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36971 is added to blk_1073741836_1012 (size=78) 2024-11-16T05:48:29,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37901 is added to blk_1073741836_1012 (size=78) 2024-11-16T05:48:29,513 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731736109489.398a98ac1da521fd64df4d7a31ba5467.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:48:29,513 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 398a98ac1da521fd64df4d7a31ba5467, disabling compactions & flushes 2024-11-16T05:48:29,513 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731736109489.398a98ac1da521fd64df4d7a31ba5467. 2024-11-16T05:48:29,513 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731736109489.398a98ac1da521fd64df4d7a31ba5467. 2024-11-16T05:48:29,513 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731736109489.398a98ac1da521fd64df4d7a31ba5467. after waiting 0 ms 2024-11-16T05:48:29,513 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731736109489.398a98ac1da521fd64df4d7a31ba5467. 2024-11-16T05:48:29,514 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731736109489.398a98ac1da521fd64df4d7a31ba5467. 2024-11-16T05:48:29,514 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 398a98ac1da521fd64df4d7a31ba5467: Waiting for close lock at 1731736109513Disabling compacts and flushes for region at 1731736109513Disabling writes for close at 1731736109513Writing region close event to WAL at 1731736109513Closed at 1731736109513 2024-11-16T05:48:29,515 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T05:48:29,515 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731736109489.398a98ac1da521fd64df4d7a31ba5467.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731736109515"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731736109515"}]},"ts":"1731736109515"} 2024-11-16T05:48:29,517 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-16T05:48:29,519 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T05:48:29,519 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731736109519"}]},"ts":"1731736109519"} 2024-11-16T05:48:29,521 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-16T05:48:29,521 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=398a98ac1da521fd64df4d7a31ba5467, ASSIGN}] 2024-11-16T05:48:29,523 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=398a98ac1da521fd64df4d7a31ba5467, ASSIGN 2024-11-16T05:48:29,524 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=398a98ac1da521fd64df4d7a31ba5467, ASSIGN; state=OFFLINE, location=3456ee6a3164,32939,1731736108541; forceNewPlan=false, retain=false 2024-11-16T05:48:29,675 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=398a98ac1da521fd64df4d7a31ba5467, regionState=OPENING, regionLocation=3456ee6a3164,32939,1731736108541 2024-11-16T05:48:29,682 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=398a98ac1da521fd64df4d7a31ba5467, ASSIGN because future has completed 2024-11-16T05:48:29,684 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 398a98ac1da521fd64df4d7a31ba5467, server=3456ee6a3164,32939,1731736108541}] 2024-11-16T05:48:29,847 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731736109489.398a98ac1da521fd64df4d7a31ba5467. 2024-11-16T05:48:29,847 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 398a98ac1da521fd64df4d7a31ba5467, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731736109489.398a98ac1da521fd64df4d7a31ba5467.', STARTKEY => '', ENDKEY => ''} 2024-11-16T05:48:29,848 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 398a98ac1da521fd64df4d7a31ba5467 2024-11-16T05:48:29,848 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731736109489.398a98ac1da521fd64df4d7a31ba5467.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:48:29,848 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 398a98ac1da521fd64df4d7a31ba5467 2024-11-16T05:48:29,848 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 398a98ac1da521fd64df4d7a31ba5467 2024-11-16T05:48:29,850 INFO [StoreOpener-398a98ac1da521fd64df4d7a31ba5467-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 398a98ac1da521fd64df4d7a31ba5467 2024-11-16T05:48:29,852 INFO [StoreOpener-398a98ac1da521fd64df4d7a31ba5467-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 398a98ac1da521fd64df4d7a31ba5467 columnFamilyName info 2024-11-16T05:48:29,852 DEBUG [StoreOpener-398a98ac1da521fd64df4d7a31ba5467-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:48:29,853 INFO [StoreOpener-398a98ac1da521fd64df4d7a31ba5467-1 {}] regionserver.HStore(327): Store=398a98ac1da521fd64df4d7a31ba5467/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T05:48:29,853 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 398a98ac1da521fd64df4d7a31ba5467 2024-11-16T05:48:29,855 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/data/default/TestLogRolling-testLogRollOnPipelineRestart/398a98ac1da521fd64df4d7a31ba5467 2024-11-16T05:48:29,856 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/data/default/TestLogRolling-testLogRollOnPipelineRestart/398a98ac1da521fd64df4d7a31ba5467 2024-11-16T05:48:29,857 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 398a98ac1da521fd64df4d7a31ba5467 2024-11-16T05:48:29,857 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 398a98ac1da521fd64df4d7a31ba5467 2024-11-16T05:48:29,859 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 398a98ac1da521fd64df4d7a31ba5467 2024-11-16T05:48:29,862 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/data/default/TestLogRolling-testLogRollOnPipelineRestart/398a98ac1da521fd64df4d7a31ba5467/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T05:48:29,862 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 398a98ac1da521fd64df4d7a31ba5467; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=825564, jitterRate=0.04975906014442444}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T05:48:29,862 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 398a98ac1da521fd64df4d7a31ba5467 2024-11-16T05:48:29,863 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 398a98ac1da521fd64df4d7a31ba5467: Running coprocessor pre-open hook at 1731736109848Writing region info on filesystem at 1731736109848Initializing all the Stores at 1731736109850 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736109850Cleaning up temporary data from old regions at 1731736109857 (+7 ms)Running coprocessor post-open hooks at 1731736109862 (+5 ms)Region opened successfully at 1731736109863 (+1 ms) 2024-11-16T05:48:29,864 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731736109489.398a98ac1da521fd64df4d7a31ba5467., pid=6, masterSystemTime=1731736109842 2024-11-16T05:48:29,867 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731736109489.398a98ac1da521fd64df4d7a31ba5467. 2024-11-16T05:48:29,867 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731736109489.398a98ac1da521fd64df4d7a31ba5467. 2024-11-16T05:48:29,868 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=398a98ac1da521fd64df4d7a31ba5467, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,32939,1731736108541 2024-11-16T05:48:29,870 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 398a98ac1da521fd64df4d7a31ba5467, server=3456ee6a3164,32939,1731736108541 because future has completed 2024-11-16T05:48:29,874 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-16T05:48:29,875 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 398a98ac1da521fd64df4d7a31ba5467, server=3456ee6a3164,32939,1731736108541 in 188 msec 2024-11-16T05:48:29,878 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-16T05:48:29,878 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=398a98ac1da521fd64df4d7a31ba5467, ASSIGN in 353 msec 2024-11-16T05:48:29,879 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T05:48:29,879 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731736109879"}]},"ts":"1731736109879"} 2024-11-16T05:48:29,881 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-16T05:48:29,882 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T05:48:29,884 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 392 msec 2024-11-16T05:48:30,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:30,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:31,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:31,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:32,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:32,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:33,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:33,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:34,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:34,305 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T05:48:34,305 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-16T05:48:34,306 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-16T05:48:34,306 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-16T05:48:34,307 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T05:48:34,307 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-16T05:48:34,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:34,845 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T05:48:34,866 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:34,866 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:34,866 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:34,867 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:34,867 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:34,867 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:34,871 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:34,871 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:34,871 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:34,873 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:34,878 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T05:48:34,878 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-16T05:48:35,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:35,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:36,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:36,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:37,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:37,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:38,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:38,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:39,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:39,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:39,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34493 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T05:48:39,600 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-16T05:48:39,601 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-16T05:48:39,606 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-16T05:48:39,606 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731736109489.398a98ac1da521fd64df4d7a31ba5467. 2024-11-16T05:48:39,610 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731736109489.398a98ac1da521fd64df4d7a31ba5467., hostname=3456ee6a3164,32939,1731736108541, seqNum=2] 2024-11-16T05:48:40,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:40,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:41,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:41,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:41,613 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736108924 2024-11-16T05:48:41,614 WARN [ResponseProcessor for block BP-82948210-172.17.0.2-1731736107907:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-82948210-172.17.0.2-1731736107907:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:41,615 WARN [ResponseProcessor for block BP-82948210-172.17.0.2-1731736107907:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-82948210-172.17.0.2-1731736107907:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:41,615 WARN [ResponseProcessor for block BP-82948210-172.17.0.2-1731736107907:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-82948210-172.17.0.2-1731736107907:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-82948210-172.17.0.2-1731736107907:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:36971,DS-d42973f3-5d06-454e-9173-095fed398d89,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:41,616 WARN [DataStreamer for file /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.meta.1731736109330.meta block BP-82948210-172.17.0.2-1731736107907:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-82948210-172.17.0.2-1731736107907:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36971,DS-d42973f3-5d06-454e-9173-095fed398d89,DISK], DatanodeInfoWithStorage[127.0.0.1:37901,DS-de00379a-4ac6-459e-ab7b-66b3e2af21f3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36971,DS-d42973f3-5d06-454e-9173-095fed398d89,DISK]) is bad. 2024-11-16T05:48:41,615 WARN [DataStreamer for file /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/WALs/3456ee6a3164,34493,1731736108497/3456ee6a3164%2C34493%2C1731736108497.1731736108629 block BP-82948210-172.17.0.2-1731736107907:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-82948210-172.17.0.2-1731736107907:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36971,DS-d42973f3-5d06-454e-9173-095fed398d89,DISK], DatanodeInfoWithStorage[127.0.0.1:37901,DS-de00379a-4ac6-459e-ab7b-66b3e2af21f3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36971,DS-d42973f3-5d06-454e-9173-095fed398d89,DISK]) is bad. 2024-11-16T05:48:41,617 WARN [DataStreamer for file /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736108924 block BP-82948210-172.17.0.2-1731736107907:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-82948210-172.17.0.2-1731736107907:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37901,DS-de00379a-4ac6-459e-ab7b-66b3e2af21f3,DISK], DatanodeInfoWithStorage[127.0.0.1:36971,DS-d42973f3-5d06-454e-9173-095fed398d89,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36971,DS-d42973f3-5d06-454e-9173-095fed398d89,DISK]) is bad. 2024-11-16T05:48:41,617 WARN [PacketResponder: BP-82948210-172.17.0.2-1731736107907:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:36971] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:41,617 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_267000008_22 at /127.0.0.1:50984 [Receiving block BP-82948210-172.17.0.2-1731736107907:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36971:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50984 dst: /127.0.0.1:36971 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:41,617 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-152507242_22 at /127.0.0.1:41216 [Receiving block BP-82948210-172.17.0.2-1731736107907:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36971:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41216 dst: /127.0.0.1:36971 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:41,619 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_267000008_22 at /127.0.0.1:59700 [Receiving block BP-82948210-172.17.0.2-1731736107907:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37901:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59700 dst: /127.0.0.1:37901 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:41,619 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-152507242_22 at /127.0.0.1:40896 [Receiving block BP-82948210-172.17.0.2-1731736107907:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37901:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40896 dst: /127.0.0.1:37901 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:41,619 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_267000008_22 at /127.0.0.1:59688 [Receiving block BP-82948210-172.17.0.2-1731736107907:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37901:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59688 dst: /127.0.0.1:37901 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:41,619 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_267000008_22 at /127.0.0.1:50980 [Receiving block BP-82948210-172.17.0.2-1731736107907:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36971:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50980 dst: /127.0.0.1:36971 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:41,621 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5b537c13{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:48:41,621 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6a93f88d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T05:48:41,622 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T05:48:41,622 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@67c5595d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T05:48:41,622 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@19254d5f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/hadoop.log.dir/,STOPPED} 2024-11-16T05:48:41,623 WARN [BP-82948210-172.17.0.2-1731736107907 heartbeating to localhost/127.0.0.1:41893 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T05:48:41,623 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T05:48:41,623 WARN [BP-82948210-172.17.0.2-1731736107907 heartbeating to localhost/127.0.0.1:41893 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-82948210-172.17.0.2-1731736107907 (Datanode Uuid 1347d7bb-63b5-4749-bd1d-eca9f1149477) service to localhost/127.0.0.1:41893 2024-11-16T05:48:41,623 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T05:48:41,624 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/cluster_daf90692-0304-c672-230d-648cdecaa0bf/data/data3/current/BP-82948210-172.17.0.2-1731736107907 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:48:41,624 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/cluster_daf90692-0304-c672-230d-648cdecaa0bf/data/data4/current/BP-82948210-172.17.0.2-1731736107907 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:48:41,624 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T05:48:41,637 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:48:41,640 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T05:48:41,641 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T05:48:41,641 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T05:48:41,641 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T05:48:41,641 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@166c77a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/hadoop.log.dir/,AVAILABLE} 2024-11-16T05:48:41,641 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@51d0668c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T05:48:41,735 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4b4947c3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/java.io.tmpdir/jetty-localhost-33891-hadoop-hdfs-3_4_1-tests_jar-_-any-12618134241762112597/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:48:41,736 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6e37a2ab{HTTP/1.1, (http/1.1)}{localhost:33891} 2024-11-16T05:48:41,736 INFO [Time-limited test {}] server.Server(415): Started @163434ms 2024-11-16T05:48:41,737 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T05:48:41,753 WARN [ResponseProcessor for block BP-82948210-172.17.0.2-1731736107907:blk_1073741833_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-82948210-172.17.0.2-1731736107907:blk_1073741833_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:41,753 WARN [ResponseProcessor for block BP-82948210-172.17.0.2-1731736107907:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-82948210-172.17.0.2-1731736107907:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:41,753 WARN [ResponseProcessor for block BP-82948210-172.17.0.2-1731736107907:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-82948210-172.17.0.2-1731736107907:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:41,754 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_267000008_22 at /127.0.0.1:49568 [Receiving block BP-82948210-172.17.0.2-1731736107907:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37901:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49568 dst: /127.0.0.1:37901 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:41,754 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_267000008_22 at /127.0.0.1:49564 [Receiving block BP-82948210-172.17.0.2-1731736107907:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37901:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49564 dst: /127.0.0.1:37901 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:41,754 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-152507242_22 at /127.0.0.1:49556 [Receiving block BP-82948210-172.17.0.2-1731736107907:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37901:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49556 dst: /127.0.0.1:37901 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:41,757 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@759e22d7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:48:41,758 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7f6bebc0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T05:48:41,758 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T05:48:41,758 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@302c0dd9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T05:48:41,758 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60326694{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/hadoop.log.dir/,STOPPED} 2024-11-16T05:48:41,759 WARN [BP-82948210-172.17.0.2-1731736107907 heartbeating to localhost/127.0.0.1:41893 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T05:48:41,759 WARN [BP-82948210-172.17.0.2-1731736107907 heartbeating to localhost/127.0.0.1:41893 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-82948210-172.17.0.2-1731736107907 (Datanode Uuid 600a9378-0940-4496-bda0-110092f00c86) service to localhost/127.0.0.1:41893 2024-11-16T05:48:41,759 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T05:48:41,759 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T05:48:41,759 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/cluster_daf90692-0304-c672-230d-648cdecaa0bf/data/data1/current/BP-82948210-172.17.0.2-1731736107907 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:48:41,760 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/cluster_daf90692-0304-c672-230d-648cdecaa0bf/data/data2/current/BP-82948210-172.17.0.2-1731736107907 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:48:41,760 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T05:48:41,768 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:48:41,771 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T05:48:41,771 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T05:48:41,771 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T05:48:41,771 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T05:48:41,772 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3f9d61c5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/hadoop.log.dir/,AVAILABLE} 2024-11-16T05:48:41,772 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@64395bd2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T05:48:41,805 WARN [Thread-1338 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T05:48:41,807 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6d3c88ee829e7037 with lease ID 0xf69a32e29d2883d4: from storage DS-d42973f3-5d06-454e-9173-095fed398d89 node DatanodeRegistration(127.0.0.1:34425, datanodeUuid=1347d7bb-63b5-4749-bd1d-eca9f1149477, infoPort=35445, infoSecurePort=0, ipcPort=38503, storageInfo=lv=-57;cid=testClusterID;nsid=938027457;c=1731736107907), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T05:48:41,807 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6d3c88ee829e7037 with lease ID 0xf69a32e29d2883d4: from storage DS-bd2b6539-0c38-4c5a-87bf-d616e2c2afec node DatanodeRegistration(127.0.0.1:34425, datanodeUuid=1347d7bb-63b5-4749-bd1d-eca9f1149477, infoPort=35445, infoSecurePort=0, ipcPort=38503, storageInfo=lv=-57;cid=testClusterID;nsid=938027457;c=1731736107907), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:48:41,866 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@28e7c97f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/java.io.tmpdir/jetty-localhost-37871-hadoop-hdfs-3_4_1-tests_jar-_-any-946572202297513364/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:48:41,867 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3393bacb{HTTP/1.1, (http/1.1)}{localhost:37871} 2024-11-16T05:48:41,867 INFO [Time-limited test {}] server.Server(415): Started @163565ms 2024-11-16T05:48:41,868 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T05:48:41,942 WARN [Thread-1369 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T05:48:41,944 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd9d6dd3b4e5ad5b2 with lease ID 0xf69a32e29d2883d5: from storage DS-de00379a-4ac6-459e-ab7b-66b3e2af21f3 node DatanodeRegistration(127.0.0.1:41017, datanodeUuid=600a9378-0940-4496-bda0-110092f00c86, infoPort=46745, infoSecurePort=0, ipcPort=43629, storageInfo=lv=-57;cid=testClusterID;nsid=938027457;c=1731736107907), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:48:41,944 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd9d6dd3b4e5ad5b2 with lease ID 0xf69a32e29d2883d5: from storage DS-4f04841f-9be5-4dbe-b91b-a852dfdbff33 node DatanodeRegistration(127.0.0.1:41017, datanodeUuid=600a9378-0940-4496-bda0-110092f00c86, infoPort=46745, infoSecurePort=0, ipcPort=43629, storageInfo=lv=-57;cid=testClusterID;nsid=938027457;c=1731736107907), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:48:42,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:42,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:42,886 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-16T05:48:42,891 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-16T05:48:42,893 ERROR [FSHLog-0-hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6-prefix:3456ee6a3164,32939,1731736108541 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37901,DS-de00379a-4ac6-459e-ab7b-66b3e2af21f3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:42,893 WARN [FSHLog-0-hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6-prefix:3456ee6a3164,32939,1731736108541 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37901,DS-de00379a-4ac6-459e-ab7b-66b3e2af21f3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:42,893 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3456ee6a3164%2C32939%2C1731736108541:(num 1731736108924) roll requested 2024-11-16T05:48:42,894 INFO [regionserver/3456ee6a3164:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C32939%2C1731736108541.1731736122893 2024-11-16T05:48:42,902 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736108924 newFile=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736122893 2024-11-16T05:48:42,902 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:42,902 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:42,903 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:42,903 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:42,903 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:42,903 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736108924 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736122893 2024-11-16T05:48:42,904 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37901,DS-de00379a-4ac6-459e-ab7b-66b3e2af21f3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:42,904 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37901,DS-de00379a-4ac6-459e-ab7b-66b3e2af21f3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:42,904 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736108924 2024-11-16T05:48:42,904 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46745:46745),(127.0.0.1/127.0.0.1:35445:35445)] 2024-11-16T05:48:42,905 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736108924 is not closed yet, will try archiving it next time 2024-11-16T05:48:42,905 WARN [IPC Server handler 0 on default port 41893 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736108924 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1015 2024-11-16T05:48:42,905 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736108924 after 1ms 2024-11-16T05:48:43,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:43,458 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:43,809 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1015: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-16T05:48:44,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:44,459 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:44,909 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-16T05:48:45,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:45,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:46,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:46,461 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:46,906 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736108924 after 4002ms 2024-11-16T05:48:46,913 WARN [ResponseProcessor for block BP-82948210-172.17.0.2-1731736107907:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-82948210-172.17.0.2-1731736107907:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:46,914 WARN [DataStreamer for file /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736122893 block BP-82948210-172.17.0.2-1731736107907:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-82948210-172.17.0.2-1731736107907:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41017,DS-de00379a-4ac6-459e-ab7b-66b3e2af21f3,DISK], DatanodeInfoWithStorage[127.0.0.1:34425,DS-d42973f3-5d06-454e-9173-095fed398d89,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41017,DS-de00379a-4ac6-459e-ab7b-66b3e2af21f3,DISK]) is bad. 2024-11-16T05:48:46,914 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_267000008_22 at /127.0.0.1:39670 [Receiving block BP-82948210-172.17.0.2-1731736107907:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:41017:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39670 dst: /127.0.0.1:41017 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:46,915 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_267000008_22 at /127.0.0.1:46170 [Receiving block BP-82948210-172.17.0.2-1731736107907:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:34425:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46170 dst: /127.0.0.1:34425 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:46,916 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@28e7c97f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:48:46,917 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3393bacb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T05:48:46,917 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T05:48:46,917 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@64395bd2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T05:48:46,917 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3f9d61c5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/hadoop.log.dir/,STOPPED} 2024-11-16T05:48:46,919 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T05:48:46,919 WARN [BP-82948210-172.17.0.2-1731736107907 heartbeating to localhost/127.0.0.1:41893 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T05:48:46,919 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T05:48:46,919 WARN [BP-82948210-172.17.0.2-1731736107907 heartbeating to localhost/127.0.0.1:41893 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-82948210-172.17.0.2-1731736107907 (Datanode Uuid 600a9378-0940-4496-bda0-110092f00c86) service to localhost/127.0.0.1:41893 2024-11-16T05:48:46,921 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/cluster_daf90692-0304-c672-230d-648cdecaa0bf/data/data1/current/BP-82948210-172.17.0.2-1731736107907 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:48:46,921 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/cluster_daf90692-0304-c672-230d-648cdecaa0bf/data/data2/current/BP-82948210-172.17.0.2-1731736107907 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:48:46,921 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T05:48:46,935 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:48:46,940 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T05:48:46,941 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T05:48:46,941 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T05:48:46,941 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T05:48:46,941 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@12c0bc10{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/hadoop.log.dir/,AVAILABLE} 2024-11-16T05:48:46,942 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@173341ee{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T05:48:47,076 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2d89661e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/java.io.tmpdir/jetty-localhost-43705-hadoop-hdfs-3_4_1-tests_jar-_-any-16360827419064644868/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:48:47,076 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22699b58{HTTP/1.1, (http/1.1)}{localhost:43705} 2024-11-16T05:48:47,076 INFO [Time-limited test {}] server.Server(415): Started @168774ms 2024-11-16T05:48:47,077 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T05:48:47,098 WARN [ResponseProcessor for block BP-82948210-172.17.0.2-1731736107907:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-82948210-172.17.0.2-1731736107907:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:47,098 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_267000008_22 at /127.0.0.1:46190 [Receiving block BP-82948210-172.17.0.2-1731736107907:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:34425:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46190 dst: /127.0.0.1:34425 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:47,104 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4b4947c3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:48:47,105 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6e37a2ab{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T05:48:47,105 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T05:48:47,105 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@51d0668c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T05:48:47,105 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@166c77a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/hadoop.log.dir/,STOPPED} 2024-11-16T05:48:47,106 WARN [BP-82948210-172.17.0.2-1731736107907 heartbeating to localhost/127.0.0.1:41893 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T05:48:47,106 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T05:48:47,106 WARN [BP-82948210-172.17.0.2-1731736107907 heartbeating to localhost/127.0.0.1:41893 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-82948210-172.17.0.2-1731736107907 (Datanode Uuid 1347d7bb-63b5-4749-bd1d-eca9f1149477) service to localhost/127.0.0.1:41893 2024-11-16T05:48:47,106 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T05:48:47,107 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/cluster_daf90692-0304-c672-230d-648cdecaa0bf/data/data3/current/BP-82948210-172.17.0.2-1731736107907 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:48:47,107 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/cluster_daf90692-0304-c672-230d-648cdecaa0bf/data/data4/current/BP-82948210-172.17.0.2-1731736107907 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:48:47,107 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T05:48:47,122 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:48:47,125 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T05:48:47,128 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T05:48:47,128 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T05:48:47,128 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T05:48:47,129 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c6927a8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/hadoop.log.dir/,AVAILABLE} 2024-11-16T05:48:47,130 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@31767992{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T05:48:47,152 WARN [Thread-1412 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T05:48:47,154 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x664d5a9fa10c1647 with lease ID 0xf69a32e29d2883d6: from storage DS-de00379a-4ac6-459e-ab7b-66b3e2af21f3 node DatanodeRegistration(127.0.0.1:42933, datanodeUuid=600a9378-0940-4496-bda0-110092f00c86, infoPort=43695, infoSecurePort=0, ipcPort=43925, storageInfo=lv=-57;cid=testClusterID;nsid=938027457;c=1731736107907), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:48:47,155 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x664d5a9fa10c1647 with lease ID 0xf69a32e29d2883d6: from storage DS-4f04841f-9be5-4dbe-b91b-a852dfdbff33 node DatanodeRegistration(127.0.0.1:42933, datanodeUuid=600a9378-0940-4496-bda0-110092f00c86, infoPort=43695, infoSecurePort=0, ipcPort=43925, storageInfo=lv=-57;cid=testClusterID;nsid=938027457;c=1731736107907), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:48:47,229 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@47d31af2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/java.io.tmpdir/jetty-localhost-33907-hadoop-hdfs-3_4_1-tests_jar-_-any-1150871525853755609/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:48:47,230 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@511ae001{HTTP/1.1, (http/1.1)}{localhost:33907} 2024-11-16T05:48:47,230 INFO [Time-limited test {}] server.Server(415): Started @168928ms 2024-11-16T05:48:47,232 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T05:48:47,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:47,296 WARN [Thread-1443 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T05:48:47,299 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5984fd4de64adf0e with lease ID 0xf69a32e29d2883d7: from storage DS-d42973f3-5d06-454e-9173-095fed398d89 node DatanodeRegistration(127.0.0.1:36293, datanodeUuid=1347d7bb-63b5-4749-bd1d-eca9f1149477, infoPort=34965, infoSecurePort=0, ipcPort=41697, storageInfo=lv=-57;cid=testClusterID;nsid=938027457;c=1731736107907), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:48:47,299 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5984fd4de64adf0e with lease ID 0xf69a32e29d2883d7: from storage DS-bd2b6539-0c38-4c5a-87bf-d616e2c2afec node DatanodeRegistration(127.0.0.1:36293, datanodeUuid=1347d7bb-63b5-4749-bd1d-eca9f1149477, infoPort=34965, infoSecurePort=0, ipcPort=41697, storageInfo=lv=-57;cid=testClusterID;nsid=938027457;c=1731736107907), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:48:47,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:48,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:48,250 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-16T05:48:48,254 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-16T05:48:48,256 ERROR [FSHLog-0-hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6-prefix:3456ee6a3164,32939,1731736108541 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34425,DS-d42973f3-5d06-454e-9173-095fed398d89,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:48,257 WARN [FSHLog-0-hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6-prefix:3456ee6a3164,32939,1731736108541 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34425,DS-d42973f3-5d06-454e-9173-095fed398d89,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:48,257 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3456ee6a3164%2C32939%2C1731736108541:(num 1731736122893) roll requested 2024-11-16T05:48:48,257 INFO [regionserver/3456ee6a3164:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C32939%2C1731736108541.1731736128257 2024-11-16T05:48:48,265 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736122893 newFile=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736128257 2024-11-16T05:48:48,266 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:48,266 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:48,266 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:48,266 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:48,266 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:48,267 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736122893 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736128257 2024-11-16T05:48:48,267 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34425,DS-d42973f3-5d06-454e-9173-095fed398d89,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:48,267 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34425,DS-d42973f3-5d06-454e-9173-095fed398d89,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:48,267 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736122893 2024-11-16T05:48:48,268 WARN [IPC Server handler 0 on default port 41893 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736122893 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-16T05:48:48,269 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736122893 after 2ms 2024-11-16T05:48:48,269 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34965:34965),(127.0.0.1/127.0.0.1:43695:43695)] 2024-11-16T05:48:48,269 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736122893 is not closed yet, will try archiving it next time 2024-11-16T05:48:48,463 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:49,156 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-16T05:48:49,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:49,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:50,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:50,271 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C32939%2C1731736108541.1731736130270 2024-11-16T05:48:50,282 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736128257 newFile=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736130270 2024-11-16T05:48:50,282 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:50,282 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:50,282 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:50,282 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:50,283 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:50,283 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736128257 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736130270 2024-11-16T05:48:50,283 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34965:34965),(127.0.0.1/127.0.0.1:43695:43695)] 2024-11-16T05:48:50,283 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736122893 is not closed yet, will try archiving it next time 2024-11-16T05:48:50,284 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736128257 is not closed yet, will try archiving it next time 2024-11-16T05:48:50,284 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736108924 2024-11-16T05:48:50,284 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736108924 2024-11-16T05:48:50,284 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736108924 after 0ms 2024-11-16T05:48:50,284 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736108924 2024-11-16T05:48:50,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42933 is added to blk_1073741838_1019 (size=1264) 2024-11-16T05:48:50,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36293 is added to blk_1073741838_1019 (size=1264) 2024-11-16T05:48:50,285 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736122893 is not closed yet, will try archiving it next time 2024-11-16T05:48:50,294 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731736109863/Put/vlen=218/seqid=0] 2024-11-16T05:48:50,294 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731736119611/Put/vlen=1045/seqid=0] 2024-11-16T05:48:50,294 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736108924 2024-11-16T05:48:50,294 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736122893 2024-11-16T05:48:50,294 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736122893 2024-11-16T05:48:50,294 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736122893 after 0ms 2024-11-16T05:48:50,294 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736122893 2024-11-16T05:48:50,297 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731736122892/Put/vlen=1045/seqid=0] 2024-11-16T05:48:50,298 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731736124911/Put/vlen=1045/seqid=0] 2024-11-16T05:48:50,298 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736122893 2024-11-16T05:48:50,298 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736128257 2024-11-16T05:48:50,298 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736128257 2024-11-16T05:48:50,298 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736128257 after 0ms 2024-11-16T05:48:50,298 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736128257 2024-11-16T05:48:50,301 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731736128256/Put/vlen=1045/seqid=0] 2024-11-16T05:48:50,301 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736130270 2024-11-16T05:48:50,301 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736130270 2024-11-16T05:48:50,301 WARN [IPC Server handler 2 on default port 41893 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736130270 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-16T05:48:50,302 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736130270 after 1ms 2024-11-16T05:48:50,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:51,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:51,299 WARN [ResponseProcessor for block BP-82948210-172.17.0.2-1731736107907:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-82948210-172.17.0.2-1731736107907:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:51,299 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-152507242_22 at /127.0.0.1:60006 [Receiving block BP-82948210-172.17.0.2-1731736107907:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:36293:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60006 dst: /127.0.0.1:36293 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:36293 remote=/127.0.0.1:60006]. Total timeout mills is 60000, 58982 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:51,299 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-152507242_22 at /127.0.0.1:50192 [Receiving block BP-82948210-172.17.0.2-1731736107907:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:42933:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50192 dst: /127.0.0.1:42933 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:48:51,300 WARN [DataStreamer for file /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736130270 block BP-82948210-172.17.0.2-1731736107907:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-82948210-172.17.0.2-1731736107907:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36293,DS-d42973f3-5d06-454e-9173-095fed398d89,DISK], DatanodeInfoWithStorage[127.0.0.1:42933,DS-de00379a-4ac6-459e-ab7b-66b3e2af21f3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36293,DS-d42973f3-5d06-454e-9173-095fed398d89,DISK]) is bad. 2024-11-16T05:48:51,301 WARN [DataStreamer for file /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736130270 block BP-82948210-172.17.0.2-1731736107907:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-82948210-172.17.0.2-1731736107907:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:51,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36293 is added to blk_1073741839_1022 (size=85) 2024-11-16T05:48:51,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42933 is added to blk_1073741839_1022 (size=85) 2024-11-16T05:48:51,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:52,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:52,270 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736122893 after 4003ms 2024-11-16T05:48:52,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:53,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:53,468 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:54,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:54,303 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736130270 after 4002ms 2024-11-16T05:48:54,303 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736130270 2024-11-16T05:48:54,310 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736130270 2024-11-16T05:48:54,311 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-16T05:48:54,312 ERROR [FSHLog-0-hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6-prefix:3456ee6a3164,32939,1731736108541.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37901,DS-de00379a-4ac6-459e-ab7b-66b3e2af21f3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:54,312 WARN [FSHLog-0-hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6-prefix:3456ee6a3164,32939,1731736108541.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37901,DS-de00379a-4ac6-459e-ab7b-66b3e2af21f3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:54,312 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3456ee6a3164%2C32939%2C1731736108541.meta:.meta(num 1731736109330) roll requested 2024-11-16T05:48:54,312 INFO [regionserver/3456ee6a3164:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C32939%2C1731736108541.meta.1731736134312.meta 2024-11-16T05:48:54,318 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:54,318 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:54,318 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:54,319 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:54,319 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:54,319 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.meta.1731736109330.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.meta.1731736134312.meta 2024-11-16T05:48:54,319 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37901,DS-de00379a-4ac6-459e-ab7b-66b3e2af21f3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:54,319 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37901,DS-de00379a-4ac6-459e-ab7b-66b3e2af21f3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:54,320 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.meta.1731736109330.meta 2024-11-16T05:48:54,320 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34965:34965),(127.0.0.1/127.0.0.1:43695:43695)] 2024-11-16T05:48:54,320 WARN [IPC Server handler 2 on default port 41893 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.meta.1731736109330.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1014 2024-11-16T05:48:54,320 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.meta.1731736109330.meta is not closed yet, will try archiving it next time 2024-11-16T05:48:54,320 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.meta.1731736109330.meta after 0ms 2024-11-16T05:48:54,338 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/data/hbase/meta/1588230740/.tmp/info/844bb7d3ae46462d9b913e35ba5d970e is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731736109489.398a98ac1da521fd64df4d7a31ba5467./info:regioninfo/1731736109868/Put/seqid=0 2024-11-16T05:48:54,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36293 is added to blk_1073741841_1025 (size=7125) 2024-11-16T05:48:54,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42933 is added to blk_1073741841_1025 (size=7125) 2024-11-16T05:48:54,344 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/data/hbase/meta/1588230740/.tmp/info/844bb7d3ae46462d9b913e35ba5d970e 2024-11-16T05:48:54,363 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/data/hbase/meta/1588230740/.tmp/ns/0c14ccb4c45e47328695197fc18d62a5 is 43, key is default/ns:d/1731736109378/Put/seqid=0 2024-11-16T05:48:54,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42933 is added to blk_1073741842_1026 (size=5153) 2024-11-16T05:48:54,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36293 is added to blk_1073741842_1026 (size=5153) 2024-11-16T05:48:54,369 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/data/hbase/meta/1588230740/.tmp/ns/0c14ccb4c45e47328695197fc18d62a5 2024-11-16T05:48:54,390 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/data/hbase/meta/1588230740/.tmp/table/33eb6724b2c945829ee351b3ede57d6c is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731736109879/Put/seqid=0 2024-11-16T05:48:54,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42933 is added to blk_1073741843_1027 (size=5438) 2024-11-16T05:48:54,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36293 is added to blk_1073741843_1027 (size=5438) 2024-11-16T05:48:54,395 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/data/hbase/meta/1588230740/.tmp/table/33eb6724b2c945829ee351b3ede57d6c 2024-11-16T05:48:54,400 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/data/hbase/meta/1588230740/.tmp/info/844bb7d3ae46462d9b913e35ba5d970e as hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/data/hbase/meta/1588230740/info/844bb7d3ae46462d9b913e35ba5d970e 2024-11-16T05:48:54,406 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/data/hbase/meta/1588230740/info/844bb7d3ae46462d9b913e35ba5d970e, entries=10, sequenceid=11, filesize=7.0 K 2024-11-16T05:48:54,407 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/data/hbase/meta/1588230740/.tmp/ns/0c14ccb4c45e47328695197fc18d62a5 as hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/data/hbase/meta/1588230740/ns/0c14ccb4c45e47328695197fc18d62a5 2024-11-16T05:48:54,413 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/data/hbase/meta/1588230740/ns/0c14ccb4c45e47328695197fc18d62a5, entries=2, sequenceid=11, filesize=5.0 K 2024-11-16T05:48:54,414 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/data/hbase/meta/1588230740/.tmp/table/33eb6724b2c945829ee351b3ede57d6c as hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/data/hbase/meta/1588230740/table/33eb6724b2c945829ee351b3ede57d6c 2024-11-16T05:48:54,421 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/data/hbase/meta/1588230740/table/33eb6724b2c945829ee351b3ede57d6c, entries=2, sequenceid=11, filesize=5.3 K 2024-11-16T05:48:54,422 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 111ms, sequenceid=11, compaction requested=false 2024-11-16T05:48:54,422 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-16T05:48:54,422 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 398a98ac1da521fd64df4d7a31ba5467 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-16T05:48:54,423 ERROR [FSHLog-0-hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6-prefix:3456ee6a3164,32939,1731736108541 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-82948210-172.17.0.2-1731736107907:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:54,423 WARN [FSHLog-0-hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6-prefix:3456ee6a3164,32939,1731736108541 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-82948210-172.17.0.2-1731736107907:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:54,424 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3456ee6a3164%2C32939%2C1731736108541:(num 1731736130270) roll requested 2024-11-16T05:48:54,424 INFO [regionserver/3456ee6a3164:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C32939%2C1731736108541.1731736134424 2024-11-16T05:48:54,429 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736130270 newFile=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736134424 2024-11-16T05:48:54,429 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:54,429 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:54,429 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:54,429 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:54,429 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:54,429 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736130270 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736134424 2024-11-16T05:48:54,429 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-82948210-172.17.0.2-1731736107907:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:54,430 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-82948210-172.17.0.2-1731736107907:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:54,430 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736130270 2024-11-16T05:48:54,430 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736130270 after 0ms 2024-11-16T05:48:54,431 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.1731736130270 to hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/oldWALs/3456ee6a3164%2C32939%2C1731736108541.1731736130270 2024-11-16T05:48:54,431 DEBUG [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34965:34965),(127.0.0.1/127.0.0.1:43695:43695)] 2024-11-16T05:48:54,445 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/data/default/TestLogRolling-testLogRollOnPipelineRestart/398a98ac1da521fd64df4d7a31ba5467/.tmp/info/26ae51f9de304594be6a9af5f47a184d is 1080, key is row1002/info:/1731736119611/Put/seqid=0 2024-11-16T05:48:54,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36293 is added to blk_1073741845_1029 (size=9270) 2024-11-16T05:48:54,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42933 is added to blk_1073741845_1029 (size=9270) 2024-11-16T05:48:54,452 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/data/default/TestLogRolling-testLogRollOnPipelineRestart/398a98ac1da521fd64df4d7a31ba5467/.tmp/info/26ae51f9de304594be6a9af5f47a184d 2024-11-16T05:48:54,459 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/data/default/TestLogRolling-testLogRollOnPipelineRestart/398a98ac1da521fd64df4d7a31ba5467/.tmp/info/26ae51f9de304594be6a9af5f47a184d as hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/data/default/TestLogRolling-testLogRollOnPipelineRestart/398a98ac1da521fd64df4d7a31ba5467/info/26ae51f9de304594be6a9af5f47a184d 2024-11-16T05:48:54,466 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/data/default/TestLogRolling-testLogRollOnPipelineRestart/398a98ac1da521fd64df4d7a31ba5467/info/26ae51f9de304594be6a9af5f47a184d, entries=4, sequenceid=8, filesize=9.1 K 2024-11-16T05:48:54,467 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 398a98ac1da521fd64df4d7a31ba5467 in 45ms, sequenceid=8, compaction requested=false 2024-11-16T05:48:54,467 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 398a98ac1da521fd64df4d7a31ba5467: 2024-11-16T05:48:54,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:54,472 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T05:48:54,472 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T05:48:54,472 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T05:48:54,473 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:48:54,473 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:48:54,473 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T05:48:54,473 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T05:48:54,473 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1721512708, stopped=false 2024-11-16T05:48:54,473 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3456ee6a3164,34493,1731736108497 2024-11-16T05:48:54,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34493-0x10047135a2e0000, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T05:48:54,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32939-0x10047135a2e0001, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T05:48:54,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34493-0x10047135a2e0000, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:48:54,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32939-0x10047135a2e0001, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:48:54,476 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T05:48:54,477 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T05:48:54,477 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T05:48:54,477 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:48:54,477 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:32939-0x10047135a2e0001, quorum=127.0.0.1:61239, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T05:48:54,477 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34493-0x10047135a2e0000, quorum=127.0.0.1:61239, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T05:48:54,477 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3456ee6a3164,32939,1731736108541' ***** 2024-11-16T05:48:54,477 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T05:48:54,477 INFO [RS:0;3456ee6a3164:32939 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T05:48:54,478 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T05:48:54,478 INFO [RS:0;3456ee6a3164:32939 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T05:48:54,478 INFO [RS:0;3456ee6a3164:32939 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T05:48:54,478 INFO [RS:0;3456ee6a3164:32939 {}] regionserver.HRegionServer(3091): Received CLOSE for 398a98ac1da521fd64df4d7a31ba5467 2024-11-16T05:48:54,478 INFO [RS:0;3456ee6a3164:32939 {}] regionserver.HRegionServer(959): stopping server 3456ee6a3164,32939,1731736108541 2024-11-16T05:48:54,478 INFO [RS:0;3456ee6a3164:32939 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T05:48:54,478 INFO [RS:0;3456ee6a3164:32939 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3456ee6a3164:32939. 2024-11-16T05:48:54,478 DEBUG [RS:0;3456ee6a3164:32939 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T05:48:54,478 DEBUG [RS:0;3456ee6a3164:32939 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:48:54,478 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 398a98ac1da521fd64df4d7a31ba5467, disabling compactions & flushes 2024-11-16T05:48:54,478 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731736109489.398a98ac1da521fd64df4d7a31ba5467. 2024-11-16T05:48:54,478 INFO [RS:0;3456ee6a3164:32939 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T05:48:54,478 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731736109489.398a98ac1da521fd64df4d7a31ba5467. 2024-11-16T05:48:54,478 INFO [RS:0;3456ee6a3164:32939 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T05:48:54,478 INFO [RS:0;3456ee6a3164:32939 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T05:48:54,478 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731736109489.398a98ac1da521fd64df4d7a31ba5467. after waiting 0 ms 2024-11-16T05:48:54,478 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731736109489.398a98ac1da521fd64df4d7a31ba5467. 2024-11-16T05:48:54,478 INFO [RS:0;3456ee6a3164:32939 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T05:48:54,479 INFO [RS:0;3456ee6a3164:32939 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-16T05:48:54,479 DEBUG [RS:0;3456ee6a3164:32939 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 398a98ac1da521fd64df4d7a31ba5467=TestLogRolling-testLogRollOnPipelineRestart,,1731736109489.398a98ac1da521fd64df4d7a31ba5467.} 2024-11-16T05:48:54,479 DEBUG [RS:0;3456ee6a3164:32939 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 398a98ac1da521fd64df4d7a31ba5467 2024-11-16T05:48:54,479 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T05:48:54,479 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T05:48:54,479 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T05:48:54,479 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T05:48:54,479 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T05:48:54,483 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/data/default/TestLogRolling-testLogRollOnPipelineRestart/398a98ac1da521fd64df4d7a31ba5467/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-16T05:48:54,483 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-16T05:48:54,483 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731736109489.398a98ac1da521fd64df4d7a31ba5467. 2024-11-16T05:48:54,483 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 398a98ac1da521fd64df4d7a31ba5467: Waiting for close lock at 1731736134478Running coprocessor pre-close hooks at 1731736134478Disabling compacts and flushes for region at 1731736134478Disabling writes for close at 1731736134478Writing region close event to WAL at 1731736134479 (+1 ms)Running coprocessor post-close hooks at 1731736134483 (+4 ms)Closed at 1731736134483 2024-11-16T05:48:54,483 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T05:48:54,483 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T05:48:54,483 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731736134479Running coprocessor pre-close hooks at 1731736134479Disabling compacts and flushes for region at 1731736134479Disabling writes for close at 1731736134479Writing region close event to WAL at 1731736134480 (+1 ms)Running coprocessor post-close hooks at 1731736134483 (+3 ms)Closed at 1731736134483 2024-11-16T05:48:54,483 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731736109489.398a98ac1da521fd64df4d7a31ba5467. 2024-11-16T05:48:54,484 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T05:48:54,679 INFO [RS:0;3456ee6a3164:32939 {}] regionserver.HRegionServer(976): stopping server 3456ee6a3164,32939,1731736108541; all regions closed. 2024-11-16T05:48:54,680 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:54,680 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:54,680 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:54,680 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:54,680 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:54,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42933 is added to blk_1073741840_1023 (size=825) 2024-11-16T05:48:54,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36293 is added to blk_1073741840_1023 (size=825) 2024-11-16T05:48:54,793 INFO [regionserver/3456ee6a3164:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T05:48:54,820 INFO [regionserver/3456ee6a3164:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-16T05:48:54,820 INFO [regionserver/3456ee6a3164:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-16T05:48:55,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:55,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:56,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:56,471 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:57,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:57,472 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:58,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:58,300 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-16T05:48:58,322 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.meta.1731736109330.meta after 4002ms 2024-11-16T05:48:58,323 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/WALs/3456ee6a3164,32939,1731736108541/3456ee6a3164%2C32939%2C1731736108541.meta.1731736109330.meta to hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/oldWALs/3456ee6a3164%2C32939%2C1731736108541.meta.1731736109330.meta 2024-11-16T05:48:58,332 DEBUG [RS:0;3456ee6a3164:32939 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/oldWALs 2024-11-16T05:48:58,332 INFO [RS:0;3456ee6a3164:32939 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3456ee6a3164%2C32939%2C1731736108541.meta:.meta(num 1731736134312) 2024-11-16T05:48:58,333 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:58,333 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:58,333 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:58,334 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:58,334 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:58,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42933 is added to blk_1073741844_1028 (size=1162) 2024-11-16T05:48:58,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36293 is added to blk_1073741844_1028 (size=1162) 2024-11-16T05:48:58,341 DEBUG [RS:0;3456ee6a3164:32939 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/oldWALs 2024-11-16T05:48:58,341 INFO [RS:0;3456ee6a3164:32939 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3456ee6a3164%2C32939%2C1731736108541:(num 1731736134424) 2024-11-16T05:48:58,341 DEBUG [RS:0;3456ee6a3164:32939 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:48:58,341 INFO [RS:0;3456ee6a3164:32939 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T05:48:58,342 INFO [RS:0;3456ee6a3164:32939 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T05:48:58,342 INFO [RS:0;3456ee6a3164:32939 {}] hbase.ChoreService(370): Chore service for: regionserver/3456ee6a3164:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T05:48:58,342 INFO [RS:0;3456ee6a3164:32939 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T05:48:58,342 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T05:48:58,342 INFO [RS:0;3456ee6a3164:32939 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32939 2024-11-16T05:48:58,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32939-0x10047135a2e0001, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3456ee6a3164,32939,1731736108541 2024-11-16T05:48:58,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34493-0x10047135a2e0000, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T05:48:58,345 INFO [RS:0;3456ee6a3164:32939 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T05:48:58,347 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3456ee6a3164,32939,1731736108541] 2024-11-16T05:48:58,348 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3456ee6a3164,32939,1731736108541 already deleted, retry=false 2024-11-16T05:48:58,349 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3456ee6a3164,32939,1731736108541 expired; onlineServers=0 2024-11-16T05:48:58,349 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3456ee6a3164,34493,1731736108497' ***** 2024-11-16T05:48:58,349 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T05:48:58,349 INFO [M:0;3456ee6a3164:34493 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T05:48:58,349 INFO [M:0;3456ee6a3164:34493 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T05:48:58,349 DEBUG [M:0;3456ee6a3164:34493 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T05:48:58,349 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T05:48:58,349 DEBUG [M:0;3456ee6a3164:34493 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T05:48:58,349 DEBUG [master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.small.0-1731736108711 {}] cleaner.HFileCleaner(306): Exit Thread[master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.small.0-1731736108711,5,FailOnTimeoutGroup] 2024-11-16T05:48:58,349 DEBUG [master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.large.0-1731736108711 {}] cleaner.HFileCleaner(306): Exit Thread[master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.large.0-1731736108711,5,FailOnTimeoutGroup] 2024-11-16T05:48:58,350 INFO [M:0;3456ee6a3164:34493 {}] hbase.ChoreService(370): Chore service for: master/3456ee6a3164:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T05:48:58,350 INFO [M:0;3456ee6a3164:34493 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T05:48:58,350 DEBUG [M:0;3456ee6a3164:34493 {}] master.HMaster(1795): Stopping service threads 2024-11-16T05:48:58,350 INFO [M:0;3456ee6a3164:34493 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T05:48:58,350 INFO [M:0;3456ee6a3164:34493 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T05:48:58,350 INFO [M:0;3456ee6a3164:34493 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T05:48:58,350 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T05:48:58,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34493-0x10047135a2e0000, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T05:48:58,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34493-0x10047135a2e0000, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:48:58,351 DEBUG [M:0;3456ee6a3164:34493 {}] zookeeper.ZKUtil(347): master:34493-0x10047135a2e0000, quorum=127.0.0.1:61239, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T05:48:58,351 WARN [M:0;3456ee6a3164:34493 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T05:48:58,352 INFO [M:0;3456ee6a3164:34493 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/.lastflushedseqids 2024-11-16T05:48:58,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42933 is added to blk_1073741846_1030 (size=139) 2024-11-16T05:48:58,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36293 is added to blk_1073741846_1030 (size=139) 2024-11-16T05:48:58,357 INFO [M:0;3456ee6a3164:34493 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T05:48:58,357 INFO [M:0;3456ee6a3164:34493 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T05:48:58,358 DEBUG [M:0;3456ee6a3164:34493 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T05:48:58,358 INFO [M:0;3456ee6a3164:34493 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:48:58,358 DEBUG [M:0;3456ee6a3164:34493 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:48:58,358 DEBUG [M:0;3456ee6a3164:34493 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T05:48:58,358 DEBUG [M:0;3456ee6a3164:34493 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:48:58,358 INFO [M:0;3456ee6a3164:34493 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.17 KB heapSize=29.16 KB 2024-11-16T05:48:58,358 ERROR [FSHLog-0-hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData-prefix:3456ee6a3164,34493,1731736108497 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37901,DS-de00379a-4ac6-459e-ab7b-66b3e2af21f3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:58,358 WARN [FSHLog-0-hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData-prefix:3456ee6a3164,34493,1731736108497 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37901,DS-de00379a-4ac6-459e-ab7b-66b3e2af21f3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:58,358 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 3456ee6a3164%2C34493%2C1731736108497:(num 1731736108629) roll requested 2024-11-16T05:48:58,359 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C34493%2C1731736108497.1731736138359 2024-11-16T05:48:58,363 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:58,363 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:58,363 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:58,363 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:58,363 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:58,364 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/WALs/3456ee6a3164,34493,1731736108497/3456ee6a3164%2C34493%2C1731736108497.1731736108629 with entries=53, filesize=26.62 KB; new WAL /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/WALs/3456ee6a3164,34493,1731736108497/3456ee6a3164%2C34493%2C1731736108497.1731736138359 2024-11-16T05:48:58,364 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37901,DS-de00379a-4ac6-459e-ab7b-66b3e2af21f3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:58,364 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37901,DS-de00379a-4ac6-459e-ab7b-66b3e2af21f3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T05:48:58,364 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/WALs/3456ee6a3164,34493,1731736108497/3456ee6a3164%2C34493%2C1731736108497.1731736108629 2024-11-16T05:48:58,364 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43695:43695),(127.0.0.1/127.0.0.1:34965:34965)] 2024-11-16T05:48:58,364 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/WALs/3456ee6a3164,34493,1731736108497/3456ee6a3164%2C34493%2C1731736108497.1731736108629 is not closed yet, will try archiving it next time 2024-11-16T05:48:58,365 WARN [IPC Server handler 4 on default port 41893 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/WALs/3456ee6a3164,34493,1731736108497/3456ee6a3164%2C34493%2C1731736108497.1731736108629 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-11-16T05:48:58,365 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/WALs/3456ee6a3164,34493,1731736108497/3456ee6a3164%2C34493%2C1731736108497.1731736108629 after 1ms 2024-11-16T05:48:58,379 DEBUG [M:0;3456ee6a3164:34493 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fbcf1928bd73452cbb91e53da5c80217 is 82, key is hbase:meta,,1/info:regioninfo/1731736109362/Put/seqid=0 2024-11-16T05:48:58,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36293 is added to blk_1073741848_1033 (size=5672) 2024-11-16T05:48:58,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42933 is added to blk_1073741848_1033 (size=5672) 2024-11-16T05:48:58,384 INFO [M:0;3456ee6a3164:34493 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fbcf1928bd73452cbb91e53da5c80217 2024-11-16T05:48:58,402 DEBUG [M:0;3456ee6a3164:34493 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d4c0bdc59bb04198a2243de2a1cb3487 is 778, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731736109883/Put/seqid=0 2024-11-16T05:48:58,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36293 is added to blk_1073741849_1034 (size=6118) 2024-11-16T05:48:58,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42933 is added to blk_1073741849_1034 (size=6118) 2024-11-16T05:48:58,407 INFO [M:0;3456ee6a3164:34493 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d4c0bdc59bb04198a2243de2a1cb3487 2024-11-16T05:48:58,424 DEBUG [M:0;3456ee6a3164:34493 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/675d6c4ab48445c5bc58f789cc25b6a5 is 69, key is 3456ee6a3164,32939,1731736108541/rs:state/1731736108779/Put/seqid=0 2024-11-16T05:48:58,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36293 is added to blk_1073741850_1035 (size=5156) 2024-11-16T05:48:58,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42933 is added to blk_1073741850_1035 (size=5156) 2024-11-16T05:48:58,429 INFO [M:0;3456ee6a3164:34493 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/675d6c4ab48445c5bc58f789cc25b6a5 2024-11-16T05:48:58,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32939-0x10047135a2e0001, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T05:48:58,447 INFO [RS:0;3456ee6a3164:32939 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T05:48:58,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32939-0x10047135a2e0001, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T05:48:58,447 INFO [RS:0;3456ee6a3164:32939 {}] regionserver.HRegionServer(1031): Exiting; stopping=3456ee6a3164,32939,1731736108541; zookeeper connection closed. 2024-11-16T05:48:58,447 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3e332f91 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3e332f91 2024-11-16T05:48:58,447 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T05:48:58,448 DEBUG [M:0;3456ee6a3164:34493 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0be6bf92cdc448dc8a10a904cae37f75 is 52, key is load_balancer_on/state:d/1731736109482/Put/seqid=0 2024-11-16T05:48:58,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36293 is added to blk_1073741851_1036 (size=5056) 2024-11-16T05:48:58,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42933 is added to blk_1073741851_1036 (size=5056) 2024-11-16T05:48:58,453 INFO [M:0;3456ee6a3164:34493 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0be6bf92cdc448dc8a10a904cae37f75 2024-11-16T05:48:58,459 DEBUG [M:0;3456ee6a3164:34493 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fbcf1928bd73452cbb91e53da5c80217 as hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/fbcf1928bd73452cbb91e53da5c80217 2024-11-16T05:48:58,463 INFO [M:0;3456ee6a3164:34493 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/fbcf1928bd73452cbb91e53da5c80217, entries=8, sequenceid=56, filesize=5.5 K 2024-11-16T05:48:58,464 DEBUG [M:0;3456ee6a3164:34493 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d4c0bdc59bb04198a2243de2a1cb3487 as hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d4c0bdc59bb04198a2243de2a1cb3487 2024-11-16T05:48:58,470 INFO [M:0;3456ee6a3164:34493 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d4c0bdc59bb04198a2243de2a1cb3487, entries=6, sequenceid=56, filesize=6.0 K 2024-11-16T05:48:58,471 DEBUG [M:0;3456ee6a3164:34493 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/675d6c4ab48445c5bc58f789cc25b6a5 as hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/675d6c4ab48445c5bc58f789cc25b6a5 2024-11-16T05:48:58,473 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:58,475 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T05:48:58,475 INFO [M:0;3456ee6a3164:34493 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/675d6c4ab48445c5bc58f789cc25b6a5, entries=1, sequenceid=56, filesize=5.0 K 2024-11-16T05:48:58,477 DEBUG [M:0;3456ee6a3164:34493 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0be6bf92cdc448dc8a10a904cae37f75 as hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0be6bf92cdc448dc8a10a904cae37f75 2024-11-16T05:48:58,481 INFO [M:0;3456ee6a3164:34493 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0be6bf92cdc448dc8a10a904cae37f75, entries=1, sequenceid=56, filesize=4.9 K 2024-11-16T05:48:58,482 INFO [M:0;3456ee6a3164:34493 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 124ms, sequenceid=56, compaction requested=false 2024-11-16T05:48:58,484 INFO [M:0;3456ee6a3164:34493 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:48:58,484 DEBUG [M:0;3456ee6a3164:34493 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731736138358Disabling compacts and flushes for region at 1731736138358Disabling writes for close at 1731736138358Obtaining lock to block concurrent updates at 1731736138358Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731736138358Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23726, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1731736138358Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731736138365 (+7 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731736138365Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731736138378 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731736138378Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731736138389 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731736138401 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731736138401Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731736138411 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731736138424 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731736138424Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731736138433 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731736138447 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731736138447Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@248dcf60: reopening flushed file at 1731736138458 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@558f9498: reopening flushed file at 1731736138463 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@17e52846: reopening flushed file at 1731736138470 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5a658d43: reopening flushed file at 1731736138476 (+6 ms)Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 124ms, sequenceid=56, compaction requested=false at 1731736138482 (+6 ms)Writing region close event to WAL at 1731736138484 (+2 ms)Closed at 1731736138484 2024-11-16T05:48:58,484 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:58,484 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:58,484 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:58,484 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:58,484 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:48:58,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36293 is added to blk_1073741847_1031 (size=757) 2024-11-16T05:48:58,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42933 is added to blk_1073741847_1031 (size=757) 2024-11-16T05:48:59,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:59,474 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:48:59,484 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:59,485 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:59,509 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:59,510 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:59,510 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:59,510 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:59,510 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:59,510 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:59,514 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:59,514 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:59,514 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:59,516 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:59,519 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:48:59,519 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:00,022 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T05:49:00,024 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:00,025 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:00,025 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:00,026 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:00,048 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:00,048 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:00,048 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:00,049 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:00,049 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:00,049 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:00,052 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:00,053 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:00,053 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:00,055 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:00,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:00,475 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:01,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:01,299 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-16T05:49:01,476 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:02,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:02,366 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/WALs/3456ee6a3164,34493,1731736108497/3456ee6a3164%2C34493%2C1731736108497.1731736108629 after 4002ms 2024-11-16T05:49:02,368 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/WALs/3456ee6a3164,34493,1731736108497/3456ee6a3164%2C34493%2C1731736108497.1731736108629 to hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/oldWALs/3456ee6a3164%2C34493%2C1731736108497.1731736108629 2024-11-16T05:49:02,376 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/MasterData/oldWALs/3456ee6a3164%2C34493%2C1731736108497.1731736108629 to hdfs://localhost:41893/user/jenkins/test-data/2e49df8e-6e07-d26e-44dd-4b3931c5e6b6/oldWALs/3456ee6a3164%2C34493%2C1731736108497.1731736108629$masterlocalwal$ 2024-11-16T05:49:02,376 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T05:49:02,376 INFO [M:0;3456ee6a3164:34493 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T05:49:02,377 INFO [M:0;3456ee6a3164:34493 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34493 2024-11-16T05:49:02,377 INFO [M:0;3456ee6a3164:34493 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T05:49:02,477 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:02,480 INFO [M:0;3456ee6a3164:34493 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T05:49:02,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34493-0x10047135a2e0000, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T05:49:02,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34493-0x10047135a2e0000, quorum=127.0.0.1:61239, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T05:49:02,485 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@47d31af2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:49:02,485 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@511ae001{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T05:49:02,485 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T05:49:02,485 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@31767992{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T05:49:02,485 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c6927a8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/hadoop.log.dir/,STOPPED} 2024-11-16T05:49:02,487 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T05:49:02,487 WARN [BP-82948210-172.17.0.2-1731736107907 heartbeating to localhost/127.0.0.1:41893 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T05:49:02,487 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T05:49:02,487 WARN [BP-82948210-172.17.0.2-1731736107907 heartbeating to localhost/127.0.0.1:41893 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-82948210-172.17.0.2-1731736107907 (Datanode Uuid 1347d7bb-63b5-4749-bd1d-eca9f1149477) service to localhost/127.0.0.1:41893 2024-11-16T05:49:02,488 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/cluster_daf90692-0304-c672-230d-648cdecaa0bf/data/data3/current/BP-82948210-172.17.0.2-1731736107907 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:49:02,488 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/cluster_daf90692-0304-c672-230d-648cdecaa0bf/data/data4/current/BP-82948210-172.17.0.2-1731736107907 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:49:02,488 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T05:49:02,490 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2d89661e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:49:02,490 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22699b58{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T05:49:02,490 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T05:49:02,491 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@173341ee{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T05:49:02,491 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@12c0bc10{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/hadoop.log.dir/,STOPPED} 2024-11-16T05:49:02,492 WARN [BP-82948210-172.17.0.2-1731736107907 heartbeating to localhost/127.0.0.1:41893 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T05:49:02,492 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T05:49:02,492 WARN [BP-82948210-172.17.0.2-1731736107907 heartbeating to localhost/127.0.0.1:41893 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-82948210-172.17.0.2-1731736107907 (Datanode Uuid 600a9378-0940-4496-bda0-110092f00c86) service to localhost/127.0.0.1:41893 2024-11-16T05:49:02,492 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T05:49:02,493 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/cluster_daf90692-0304-c672-230d-648cdecaa0bf/data/data1/current/BP-82948210-172.17.0.2-1731736107907 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:49:02,493 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/cluster_daf90692-0304-c672-230d-648cdecaa0bf/data/data2/current/BP-82948210-172.17.0.2-1731736107907 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:49:02,494 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T05:49:02,504 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@24f1fcf5{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T05:49:02,504 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@35d68916{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T05:49:02,504 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T05:49:02,505 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49217387{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T05:49:02,505 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4778755b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/hadoop.log.dir/,STOPPED} 2024-11-16T05:49:02,511 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T05:49:02,530 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T05:49:02,536 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=181 (was 155) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1436593709) connection to localhost/127.0.0.1:41893 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41893 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41893 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:41893 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41893 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1436593709) connection to localhost/127.0.0.1:41893 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41893 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1436593709) connection to localhost/127.0.0.1:41893 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=455 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=94 (was 86) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2889 (was 3234) 2024-11-16T05:49:02,543 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=181, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=94, ProcessCount=11, AvailableMemoryMB=2889 2024-11-16T05:49:02,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T05:49:02,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/hadoop.log.dir so I do NOT create it in target/test-data/74c0971c-dadc-28f0-4427-662df9703079 2024-11-16T05:49:02,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fc1a1824-9dcc-3503-7090-6794e60c2ec0/hadoop.tmp.dir so I do NOT create it in target/test-data/74c0971c-dadc-28f0-4427-662df9703079 2024-11-16T05:49:02,543 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/cluster_b3319eb2-4a13-a12b-ba87-d83552692400, deleteOnExit=true 2024-11-16T05:49:02,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T05:49:02,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/test.cache.data in system properties and HBase conf 2024-11-16T05:49:02,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T05:49:02,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/hadoop.log.dir in system properties and HBase conf 2024-11-16T05:49:02,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T05:49:02,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T05:49:02,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T05:49:02,544 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T05:49:02,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T05:49:02,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T05:49:02,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T05:49:02,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T05:49:02,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T05:49:02,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T05:49:02,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T05:49:02,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T05:49:02,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T05:49:02,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/nfs.dump.dir in system properties and HBase conf 2024-11-16T05:49:02,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/java.io.tmpdir in system properties and HBase conf 2024-11-16T05:49:02,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T05:49:02,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T05:49:02,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T05:49:02,557 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T05:49:02,601 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:49:02,604 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T05:49:02,605 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T05:49:02,605 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T05:49:02,605 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T05:49:02,606 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:49:02,606 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70358ec4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/hadoop.log.dir/,AVAILABLE} 2024-11-16T05:49:02,606 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45ec22ac{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T05:49:02,699 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1aa7e2ef{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/java.io.tmpdir/jetty-localhost-44927-hadoop-hdfs-3_4_1-tests_jar-_-any-11155103834906317964/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T05:49:02,700 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7ed05334{HTTP/1.1, (http/1.1)}{localhost:44927} 2024-11-16T05:49:02,700 INFO [Time-limited test {}] server.Server(415): Started @184399ms 2024-11-16T05:49:02,711 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T05:49:02,749 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:49:02,752 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T05:49:02,753 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T05:49:02,753 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T05:49:02,753 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T05:49:02,754 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60563caf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/hadoop.log.dir/,AVAILABLE} 2024-11-16T05:49:02,754 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6b792597{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T05:49:02,846 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7331cedc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/java.io.tmpdir/jetty-localhost-43113-hadoop-hdfs-3_4_1-tests_jar-_-any-6420716077691801121/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:49:02,846 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1aa3c43{HTTP/1.1, (http/1.1)}{localhost:43113} 2024-11-16T05:49:02,846 INFO [Time-limited test {}] server.Server(415): Started @184545ms 2024-11-16T05:49:02,847 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T05:49:02,870 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:49:02,873 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T05:49:02,874 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T05:49:02,874 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T05:49:02,874 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T05:49:02,875 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4eefceda{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/hadoop.log.dir/,AVAILABLE} 2024-11-16T05:49:02,875 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@57887c0e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T05:49:02,907 WARN [Thread-1637 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/cluster_b3319eb2-4a13-a12b-ba87-d83552692400/data/data1/current/BP-1011988408-172.17.0.2-1731736142567/current, will proceed with Du for space computation calculation, 2024-11-16T05:49:02,908 WARN [Thread-1638 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/cluster_b3319eb2-4a13-a12b-ba87-d83552692400/data/data2/current/BP-1011988408-172.17.0.2-1731736142567/current, will proceed with Du for space computation calculation, 2024-11-16T05:49:02,922 WARN [Thread-1616 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T05:49:02,925 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x819f62ffd182a611 with lease ID 0xe446bc2bac8ee2d9: Processing first storage report for DS-4bd84047-0ddf-4371-a61f-36b6e6e1d2b8 from datanode DatanodeRegistration(127.0.0.1:38631, datanodeUuid=3368ae5b-0160-440d-8c69-4e759bdc1c27, infoPort=41693, infoSecurePort=0, ipcPort=45255, storageInfo=lv=-57;cid=testClusterID;nsid=235784718;c=1731736142567) 2024-11-16T05:49:02,925 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x819f62ffd182a611 with lease ID 0xe446bc2bac8ee2d9: from storage DS-4bd84047-0ddf-4371-a61f-36b6e6e1d2b8 node DatanodeRegistration(127.0.0.1:38631, datanodeUuid=3368ae5b-0160-440d-8c69-4e759bdc1c27, infoPort=41693, infoSecurePort=0, ipcPort=45255, storageInfo=lv=-57;cid=testClusterID;nsid=235784718;c=1731736142567), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:49:02,925 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x819f62ffd182a611 with lease ID 0xe446bc2bac8ee2d9: Processing first storage report for DS-deab0ddf-1c85-4138-b2e8-94d2592ec81a from datanode DatanodeRegistration(127.0.0.1:38631, datanodeUuid=3368ae5b-0160-440d-8c69-4e759bdc1c27, infoPort=41693, infoSecurePort=0, ipcPort=45255, storageInfo=lv=-57;cid=testClusterID;nsid=235784718;c=1731736142567) 2024-11-16T05:49:02,925 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x819f62ffd182a611 with lease ID 0xe446bc2bac8ee2d9: from storage DS-deab0ddf-1c85-4138-b2e8-94d2592ec81a node DatanodeRegistration(127.0.0.1:38631, datanodeUuid=3368ae5b-0160-440d-8c69-4e759bdc1c27, infoPort=41693, infoSecurePort=0, ipcPort=45255, storageInfo=lv=-57;cid=testClusterID;nsid=235784718;c=1731736142567), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:49:02,980 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5afe5563{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/java.io.tmpdir/jetty-localhost-42983-hadoop-hdfs-3_4_1-tests_jar-_-any-14264966107114897467/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:49:02,980 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2e9e090d{HTTP/1.1, (http/1.1)}{localhost:42983} 2024-11-16T05:49:02,980 INFO [Time-limited test {}] server.Server(415): Started @184678ms 2024-11-16T05:49:02,981 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T05:49:03,043 WARN [Thread-1663 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/cluster_b3319eb2-4a13-a12b-ba87-d83552692400/data/data3/current/BP-1011988408-172.17.0.2-1731736142567/current, will proceed with Du for space computation calculation, 2024-11-16T05:49:03,043 WARN [Thread-1664 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/cluster_b3319eb2-4a13-a12b-ba87-d83552692400/data/data4/current/BP-1011988408-172.17.0.2-1731736142567/current, will proceed with Du for space computation calculation, 2024-11-16T05:49:03,058 WARN [Thread-1652 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T05:49:03,060 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb4323386e0e2279e with lease ID 0xe446bc2bac8ee2da: Processing first storage report for DS-d1bd78c5-6881-4914-b193-a5d024be40b1 from datanode DatanodeRegistration(127.0.0.1:37139, datanodeUuid=513f750c-11d8-4c92-982d-bbb697c36b28, infoPort=33343, infoSecurePort=0, ipcPort=38135, storageInfo=lv=-57;cid=testClusterID;nsid=235784718;c=1731736142567) 2024-11-16T05:49:03,060 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb4323386e0e2279e with lease ID 0xe446bc2bac8ee2da: from storage DS-d1bd78c5-6881-4914-b193-a5d024be40b1 node DatanodeRegistration(127.0.0.1:37139, datanodeUuid=513f750c-11d8-4c92-982d-bbb697c36b28, infoPort=33343, infoSecurePort=0, ipcPort=38135, storageInfo=lv=-57;cid=testClusterID;nsid=235784718;c=1731736142567), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:49:03,060 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb4323386e0e2279e with lease ID 0xe446bc2bac8ee2da: Processing first storage report for DS-ae6a7b37-7614-4edb-9a22-918e14162082 from datanode DatanodeRegistration(127.0.0.1:37139, datanodeUuid=513f750c-11d8-4c92-982d-bbb697c36b28, infoPort=33343, infoSecurePort=0, ipcPort=38135, storageInfo=lv=-57;cid=testClusterID;nsid=235784718;c=1731736142567) 2024-11-16T05:49:03,060 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb4323386e0e2279e with lease ID 0xe446bc2bac8ee2da: from storage DS-ae6a7b37-7614-4edb-9a22-918e14162082 node DatanodeRegistration(127.0.0.1:37139, datanodeUuid=513f750c-11d8-4c92-982d-bbb697c36b28, infoPort=33343, infoSecurePort=0, ipcPort=38135, storageInfo=lv=-57;cid=testClusterID;nsid=235784718;c=1731736142567), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:49:03,103 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079 2024-11-16T05:49:03,108 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/cluster_b3319eb2-4a13-a12b-ba87-d83552692400/zookeeper_0, clientPort=62607, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/cluster_b3319eb2-4a13-a12b-ba87-d83552692400/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/cluster_b3319eb2-4a13-a12b-ba87-d83552692400/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T05:49:03,109 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62607 2024-11-16T05:49:03,109 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:49:03,111 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:49:03,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38631 is added to blk_1073741825_1001 (size=7) 2024-11-16T05:49:03,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37139 is added to blk_1073741825_1001 (size=7) 2024-11-16T05:49:03,120 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410 with version=8 2024-11-16T05:49:03,120 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/hbase-staging 2024-11-16T05:49:03,122 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3456ee6a3164:0 server-side Connection retries=45 2024-11-16T05:49:03,122 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T05:49:03,122 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T05:49:03,122 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T05:49:03,122 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T05:49:03,122 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T05:49:03,122 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T05:49:03,122 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T05:49:03,123 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33323 2024-11-16T05:49:03,125 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33323 connecting to ZooKeeper ensemble=127.0.0.1:62607 2024-11-16T05:49:03,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:333230x0, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T05:49:03,128 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33323-0x1004713e1710000 connected 2024-11-16T05:49:03,144 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:49:03,145 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:49:03,147 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33323-0x1004713e1710000, quorum=127.0.0.1:62607, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T05:49:03,147 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410, hbase.cluster.distributed=false 2024-11-16T05:49:03,149 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33323-0x1004713e1710000, quorum=127.0.0.1:62607, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T05:49:03,151 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33323 2024-11-16T05:49:03,151 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33323 2024-11-16T05:49:03,151 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33323 2024-11-16T05:49:03,152 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33323 2024-11-16T05:49:03,153 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33323 2024-11-16T05:49:03,166 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3456ee6a3164:0 server-side Connection retries=45 2024-11-16T05:49:03,166 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T05:49:03,166 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T05:49:03,166 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T05:49:03,166 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T05:49:03,166 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T05:49:03,166 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T05:49:03,166 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T05:49:03,167 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46399 2024-11-16T05:49:03,168 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46399 connecting to ZooKeeper ensemble=127.0.0.1:62607 2024-11-16T05:49:03,169 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:49:03,170 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:49:03,173 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:463990x0, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T05:49:03,173 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46399-0x1004713e1710001 connected 2024-11-16T05:49:03,173 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46399-0x1004713e1710001, quorum=127.0.0.1:62607, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T05:49:03,174 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T05:49:03,174 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T05:49:03,174 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46399-0x1004713e1710001, quorum=127.0.0.1:62607, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T05:49:03,175 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46399-0x1004713e1710001, quorum=127.0.0.1:62607, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T05:49:03,176 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46399 2024-11-16T05:49:03,176 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46399 2024-11-16T05:49:03,176 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46399 2024-11-16T05:49:03,176 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46399 2024-11-16T05:49:03,176 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46399 2024-11-16T05:49:03,187 DEBUG [M:0;3456ee6a3164:33323 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3456ee6a3164:33323 2024-11-16T05:49:03,187 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3456ee6a3164,33323,1731736143122 2024-11-16T05:49:03,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33323-0x1004713e1710000, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T05:49:03,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46399-0x1004713e1710001, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T05:49:03,189 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33323-0x1004713e1710000, quorum=127.0.0.1:62607, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3456ee6a3164,33323,1731736143122 2024-11-16T05:49:03,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46399-0x1004713e1710001, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T05:49:03,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33323-0x1004713e1710000, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:49:03,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46399-0x1004713e1710001, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:49:03,190 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33323-0x1004713e1710000, quorum=127.0.0.1:62607, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T05:49:03,191 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3456ee6a3164,33323,1731736143122 from backup master directory 2024-11-16T05:49:03,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33323-0x1004713e1710000, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3456ee6a3164,33323,1731736143122 2024-11-16T05:49:03,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46399-0x1004713e1710001, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T05:49:03,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33323-0x1004713e1710000, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T05:49:03,191 WARN [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T05:49:03,191 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3456ee6a3164,33323,1731736143122 2024-11-16T05:49:03,195 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/hbase.id] with ID: c007d917-fc6b-48dc-b131-afc26c1e4531 2024-11-16T05:49:03,195 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/.tmp/hbase.id 2024-11-16T05:49:03,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37139 is added to blk_1073741826_1002 (size=42) 2024-11-16T05:49:03,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38631 is added to blk_1073741826_1002 (size=42) 2024-11-16T05:49:03,201 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/.tmp/hbase.id]:[hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/hbase.id] 2024-11-16T05:49:03,213 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:49:03,213 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T05:49:03,215 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-16T05:49:03,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33323-0x1004713e1710000, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:49:03,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46399-0x1004713e1710001, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:49:03,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38631 is added to blk_1073741827_1003 (size=196) 2024-11-16T05:49:03,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37139 is added to blk_1073741827_1003 (size=196) 2024-11-16T05:49:03,226 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T05:49:03,227 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T05:49:03,227 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T05:49:03,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38631 is added to blk_1073741828_1004 (size=1189) 2024-11-16T05:49:03,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37139 is added to blk_1073741828_1004 (size=1189) 2024-11-16T05:49:03,235 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/MasterData/data/master/store 2024-11-16T05:49:03,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38631 is added to blk_1073741829_1005 (size=34) 2024-11-16T05:49:03,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37139 is added to blk_1073741829_1005 (size=34) 2024-11-16T05:49:03,242 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:49:03,242 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T05:49:03,242 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:49:03,242 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:49:03,242 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T05:49:03,242 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:49:03,242 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:49:03,243 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731736143242Disabling compacts and flushes for region at 1731736143242Disabling writes for close at 1731736143242Writing region close event to WAL at 1731736143242Closed at 1731736143242 2024-11-16T05:49:03,243 WARN [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/MasterData/data/master/store/.initializing 2024-11-16T05:49:03,243 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/MasterData/WALs/3456ee6a3164,33323,1731736143122 2024-11-16T05:49:03,245 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3456ee6a3164%2C33323%2C1731736143122, suffix=, logDir=hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/MasterData/WALs/3456ee6a3164,33323,1731736143122, archiveDir=hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/MasterData/oldWALs, maxLogs=10 2024-11-16T05:49:03,246 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C33323%2C1731736143122.1731736143246 2024-11-16T05:49:03,251 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/MasterData/WALs/3456ee6a3164,33323,1731736143122/3456ee6a3164%2C33323%2C1731736143122.1731736143246 2024-11-16T05:49:03,257 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33343:33343),(127.0.0.1/127.0.0.1:41693:41693)] 2024-11-16T05:49:03,257 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T05:49:03,258 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:49:03,258 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:49:03,258 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:49:03,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:03,259 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:49:03,261 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T05:49:03,261 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:49:03,262 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:49:03,262 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:49:03,263 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T05:49:03,263 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:49:03,264 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T05:49:03,264 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:49:03,265 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T05:49:03,265 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:49:03,265 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T05:49:03,266 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:49:03,267 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T05:49:03,267 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:49:03,267 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T05:49:03,267 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:49:03,268 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:49:03,269 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:49:03,270 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:49:03,270 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:49:03,271 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T05:49:03,272 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:49:03,281 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T05:49:03,282 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=792877, jitterRate=0.008196219801902771}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T05:49:03,283 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731736143258Initializing all the Stores at 1731736143259 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736143259Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736143259Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736143259Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736143259Cleaning up temporary data from old regions at 1731736143270 (+11 ms)Region opened successfully at 1731736143282 (+12 ms) 2024-11-16T05:49:03,283 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T05:49:03,288 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4475bd33, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3456ee6a3164/172.17.0.2:0 2024-11-16T05:49:03,289 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T05:49:03,289 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T05:49:03,289 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T05:49:03,289 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T05:49:03,290 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T05:49:03,290 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T05:49:03,290 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T05:49:03,292 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T05:49:03,293 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33323-0x1004713e1710000, quorum=127.0.0.1:62607, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T05:49:03,295 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T05:49:03,296 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T05:49:03,296 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33323-0x1004713e1710000, quorum=127.0.0.1:62607, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T05:49:03,298 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T05:49:03,298 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T05:49:03,299 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33323-0x1004713e1710000, quorum=127.0.0.1:62607, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T05:49:03,300 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T05:49:03,300 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33323-0x1004713e1710000, quorum=127.0.0.1:62607, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T05:49:03,302 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T05:49:03,304 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33323-0x1004713e1710000, quorum=127.0.0.1:62607, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T05:49:03,307 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T05:49:03,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33323-0x1004713e1710000, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T05:49:03,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33323-0x1004713e1710000, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:49:03,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46399-0x1004713e1710001, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T05:49:03,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46399-0x1004713e1710001, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:49:03,310 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3456ee6a3164,33323,1731736143122, sessionid=0x1004713e1710000, setting cluster-up flag (Was=false) 2024-11-16T05:49:03,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46399-0x1004713e1710001, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:49:03,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33323-0x1004713e1710000, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:49:03,321 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T05:49:03,322 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3456ee6a3164,33323,1731736143122 2024-11-16T05:49:03,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46399-0x1004713e1710001, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:49:03,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33323-0x1004713e1710000, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:49:03,335 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T05:49:03,336 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3456ee6a3164,33323,1731736143122 2024-11-16T05:49:03,337 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T05:49:03,339 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T05:49:03,340 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T05:49:03,340 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T05:49:03,340 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3456ee6a3164,33323,1731736143122 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T05:49:03,341 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3456ee6a3164:0, corePoolSize=5, maxPoolSize=5 2024-11-16T05:49:03,341 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3456ee6a3164:0, corePoolSize=5, maxPoolSize=5 2024-11-16T05:49:03,341 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3456ee6a3164:0, corePoolSize=5, maxPoolSize=5 2024-11-16T05:49:03,342 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3456ee6a3164:0, corePoolSize=5, maxPoolSize=5 2024-11-16T05:49:03,342 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3456ee6a3164:0, corePoolSize=10, maxPoolSize=10 2024-11-16T05:49:03,342 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:49:03,342 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3456ee6a3164:0, corePoolSize=2, maxPoolSize=2 2024-11-16T05:49:03,342 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:49:03,343 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731736173343 2024-11-16T05:49:03,343 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T05:49:03,343 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T05:49:03,343 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T05:49:03,343 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T05:49:03,343 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T05:49:03,343 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T05:49:03,343 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:03,343 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T05:49:03,343 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T05:49:03,344 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T05:49:03,344 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T05:49:03,344 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T05:49:03,344 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T05:49:03,344 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T05:49:03,344 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.large.0-1731736143344,5,FailOnTimeoutGroup] 2024-11-16T05:49:03,344 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.small.0-1731736143344,5,FailOnTimeoutGroup] 2024-11-16T05:49:03,344 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:03,344 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T05:49:03,344 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:03,345 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:03,345 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:49:03,345 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T05:49:03,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38631 is added to blk_1073741831_1007 (size=1321) 2024-11-16T05:49:03,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37139 is added to blk_1073741831_1007 (size=1321) 2024-11-16T05:49:03,352 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T05:49:03,353 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410 2024-11-16T05:49:03,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37139 is added to blk_1073741832_1008 (size=32) 2024-11-16T05:49:03,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38631 is added to blk_1073741832_1008 (size=32) 2024-11-16T05:49:03,360 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:49:03,362 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T05:49:03,364 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T05:49:03,364 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:49:03,365 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:49:03,365 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T05:49:03,367 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T05:49:03,367 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:49:03,368 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:49:03,368 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T05:49:03,370 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T05:49:03,370 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:49:03,370 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:49:03,370 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T05:49:03,371 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T05:49:03,371 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:49:03,372 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:49:03,372 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T05:49:03,372 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/hbase/meta/1588230740 2024-11-16T05:49:03,373 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/hbase/meta/1588230740 2024-11-16T05:49:03,374 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T05:49:03,374 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T05:49:03,375 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T05:49:03,376 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T05:49:03,377 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T05:49:03,378 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=711531, jitterRate=-0.09524159133434296}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T05:49:03,378 INFO [RS:0;3456ee6a3164:46399 {}] regionserver.HRegionServer(746): ClusterId : c007d917-fc6b-48dc-b131-afc26c1e4531 2024-11-16T05:49:03,378 DEBUG [RS:0;3456ee6a3164:46399 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T05:49:03,378 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731736143360Initializing all the Stores at 1731736143361 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736143361Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736143362 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736143362Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736143362Cleaning up temporary data from old regions at 1731736143374 (+12 ms)Region opened successfully at 1731736143378 (+4 ms) 2024-11-16T05:49:03,378 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T05:49:03,378 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T05:49:03,378 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T05:49:03,379 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T05:49:03,379 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T05:49:03,379 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T05:49:03,379 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731736143378Disabling compacts and flushes for region at 1731736143378Disabling writes for close at 1731736143379 (+1 ms)Writing region close event to WAL at 1731736143379Closed at 1731736143379 2024-11-16T05:49:03,380 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T05:49:03,380 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T05:49:03,380 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T05:49:03,381 DEBUG [RS:0;3456ee6a3164:46399 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T05:49:03,381 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T05:49:03,381 DEBUG [RS:0;3456ee6a3164:46399 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T05:49:03,382 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T05:49:03,384 DEBUG [RS:0;3456ee6a3164:46399 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T05:49:03,384 DEBUG [RS:0;3456ee6a3164:46399 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31a24dde, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3456ee6a3164/172.17.0.2:0 2024-11-16T05:49:03,397 DEBUG [RS:0;3456ee6a3164:46399 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3456ee6a3164:46399 2024-11-16T05:49:03,397 INFO [RS:0;3456ee6a3164:46399 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T05:49:03,397 INFO [RS:0;3456ee6a3164:46399 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T05:49:03,397 DEBUG [RS:0;3456ee6a3164:46399 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T05:49:03,398 INFO [RS:0;3456ee6a3164:46399 {}] regionserver.HRegionServer(2659): reportForDuty to master=3456ee6a3164,33323,1731736143122 with port=46399, startcode=1731736143165 2024-11-16T05:49:03,398 DEBUG [RS:0;3456ee6a3164:46399 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T05:49:03,400 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35731, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T05:49:03,400 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33323 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3456ee6a3164,46399,1731736143165 2024-11-16T05:49:03,400 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33323 {}] master.ServerManager(517): Registering regionserver=3456ee6a3164,46399,1731736143165 2024-11-16T05:49:03,402 DEBUG [RS:0;3456ee6a3164:46399 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410 2024-11-16T05:49:03,402 DEBUG [RS:0;3456ee6a3164:46399 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46665 2024-11-16T05:49:03,402 DEBUG [RS:0;3456ee6a3164:46399 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T05:49:03,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33323-0x1004713e1710000, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T05:49:03,405 DEBUG [RS:0;3456ee6a3164:46399 {}] zookeeper.ZKUtil(111): regionserver:46399-0x1004713e1710001, quorum=127.0.0.1:62607, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3456ee6a3164,46399,1731736143165 2024-11-16T05:49:03,405 WARN [RS:0;3456ee6a3164:46399 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T05:49:03,405 INFO [RS:0;3456ee6a3164:46399 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T05:49:03,405 DEBUG [RS:0;3456ee6a3164:46399 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/WALs/3456ee6a3164,46399,1731736143165 2024-11-16T05:49:03,405 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3456ee6a3164,46399,1731736143165] 2024-11-16T05:49:03,408 INFO [RS:0;3456ee6a3164:46399 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T05:49:03,409 INFO [RS:0;3456ee6a3164:46399 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T05:49:03,410 INFO [RS:0;3456ee6a3164:46399 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T05:49:03,410 INFO [RS:0;3456ee6a3164:46399 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:03,410 INFO [RS:0;3456ee6a3164:46399 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T05:49:03,411 INFO [RS:0;3456ee6a3164:46399 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T05:49:03,411 INFO [RS:0;3456ee6a3164:46399 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:03,411 DEBUG [RS:0;3456ee6a3164:46399 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:49:03,411 DEBUG [RS:0;3456ee6a3164:46399 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:49:03,411 DEBUG [RS:0;3456ee6a3164:46399 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:49:03,411 DEBUG [RS:0;3456ee6a3164:46399 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:49:03,411 DEBUG [RS:0;3456ee6a3164:46399 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:49:03,411 DEBUG [RS:0;3456ee6a3164:46399 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3456ee6a3164:0, corePoolSize=2, maxPoolSize=2 2024-11-16T05:49:03,411 DEBUG [RS:0;3456ee6a3164:46399 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:49:03,411 DEBUG [RS:0;3456ee6a3164:46399 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:49:03,411 DEBUG [RS:0;3456ee6a3164:46399 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:49:03,411 DEBUG [RS:0;3456ee6a3164:46399 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:49:03,412 DEBUG [RS:0;3456ee6a3164:46399 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:49:03,412 DEBUG [RS:0;3456ee6a3164:46399 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:49:03,412 DEBUG [RS:0;3456ee6a3164:46399 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0, corePoolSize=3, maxPoolSize=3 2024-11-16T05:49:03,412 DEBUG [RS:0;3456ee6a3164:46399 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0, corePoolSize=3, maxPoolSize=3 2024-11-16T05:49:03,412 INFO [RS:0;3456ee6a3164:46399 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:03,412 INFO [RS:0;3456ee6a3164:46399 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:03,412 INFO [RS:0;3456ee6a3164:46399 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:03,412 INFO [RS:0;3456ee6a3164:46399 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:03,412 INFO [RS:0;3456ee6a3164:46399 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:03,412 INFO [RS:0;3456ee6a3164:46399 {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,46399,1731736143165-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T05:49:03,427 INFO [RS:0;3456ee6a3164:46399 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T05:49:03,427 INFO [RS:0;3456ee6a3164:46399 {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,46399,1731736143165-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:03,427 INFO [RS:0;3456ee6a3164:46399 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:03,427 INFO [RS:0;3456ee6a3164:46399 {}] regionserver.Replication(171): 3456ee6a3164,46399,1731736143165 started 2024-11-16T05:49:03,438 INFO [RS:0;3456ee6a3164:46399 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:03,438 INFO [RS:0;3456ee6a3164:46399 {}] regionserver.HRegionServer(1482): Serving as 3456ee6a3164,46399,1731736143165, RpcServer on 3456ee6a3164/172.17.0.2:46399, sessionid=0x1004713e1710001 2024-11-16T05:49:03,438 DEBUG [RS:0;3456ee6a3164:46399 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T05:49:03,438 DEBUG [RS:0;3456ee6a3164:46399 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3456ee6a3164,46399,1731736143165 2024-11-16T05:49:03,438 DEBUG [RS:0;3456ee6a3164:46399 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3456ee6a3164,46399,1731736143165' 2024-11-16T05:49:03,438 DEBUG [RS:0;3456ee6a3164:46399 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T05:49:03,439 DEBUG [RS:0;3456ee6a3164:46399 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T05:49:03,439 DEBUG [RS:0;3456ee6a3164:46399 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T05:49:03,439 DEBUG [RS:0;3456ee6a3164:46399 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T05:49:03,439 DEBUG [RS:0;3456ee6a3164:46399 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3456ee6a3164,46399,1731736143165 2024-11-16T05:49:03,439 DEBUG [RS:0;3456ee6a3164:46399 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3456ee6a3164,46399,1731736143165' 2024-11-16T05:49:03,439 DEBUG [RS:0;3456ee6a3164:46399 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T05:49:03,440 DEBUG [RS:0;3456ee6a3164:46399 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T05:49:03,440 DEBUG [RS:0;3456ee6a3164:46399 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T05:49:03,440 INFO [RS:0;3456ee6a3164:46399 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T05:49:03,440 INFO [RS:0;3456ee6a3164:46399 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T05:49:03,478 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:03,532 WARN [3456ee6a3164:33323 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-16T05:49:03,543 INFO [RS:0;3456ee6a3164:46399 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3456ee6a3164%2C46399%2C1731736143165, suffix=, logDir=hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/WALs/3456ee6a3164,46399,1731736143165, archiveDir=hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/oldWALs, maxLogs=32 2024-11-16T05:49:03,544 INFO [RS:0;3456ee6a3164:46399 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C46399%2C1731736143165.1731736143544 2024-11-16T05:49:03,553 INFO [RS:0;3456ee6a3164:46399 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/WALs/3456ee6a3164,46399,1731736143165/3456ee6a3164%2C46399%2C1731736143165.1731736143544 2024-11-16T05:49:03,554 DEBUG [RS:0;3456ee6a3164:46399 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33343:33343),(127.0.0.1/127.0.0.1:41693:41693)] 2024-11-16T05:49:03,783 DEBUG [3456ee6a3164:33323 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T05:49:03,784 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3456ee6a3164,46399,1731736143165 2024-11-16T05:49:03,787 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3456ee6a3164,46399,1731736143165, state=OPENING 2024-11-16T05:49:03,789 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T05:49:03,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46399-0x1004713e1710001, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:49:03,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33323-0x1004713e1710000, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:49:03,792 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T05:49:03,792 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T05:49:03,792 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T05:49:03,792 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3456ee6a3164,46399,1731736143165}] 2024-11-16T05:49:03,948 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T05:49:03,951 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49825, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T05:49:03,958 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T05:49:03,958 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T05:49:03,961 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3456ee6a3164%2C46399%2C1731736143165.meta, suffix=.meta, logDir=hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/WALs/3456ee6a3164,46399,1731736143165, archiveDir=hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/oldWALs, maxLogs=32 2024-11-16T05:49:03,962 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C46399%2C1731736143165.meta.1731736143962.meta 2024-11-16T05:49:03,968 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/WALs/3456ee6a3164,46399,1731736143165/3456ee6a3164%2C46399%2C1731736143165.meta.1731736143962.meta 2024-11-16T05:49:03,973 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41693:41693),(127.0.0.1/127.0.0.1:33343:33343)] 2024-11-16T05:49:03,976 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T05:49:03,976 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T05:49:03,976 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T05:49:03,976 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T05:49:03,976 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T05:49:03,976 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:49:03,976 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T05:49:03,976 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T05:49:03,978 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T05:49:03,979 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T05:49:03,979 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:49:03,979 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:49:03,979 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T05:49:03,980 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T05:49:03,980 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:49:03,980 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:49:03,980 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T05:49:03,981 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T05:49:03,981 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:49:03,981 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:49:03,982 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T05:49:03,982 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T05:49:03,982 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:49:03,983 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:49:03,983 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T05:49:03,983 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/hbase/meta/1588230740 2024-11-16T05:49:03,984 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/hbase/meta/1588230740 2024-11-16T05:49:03,986 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T05:49:03,986 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T05:49:03,986 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T05:49:03,988 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T05:49:03,989 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=843712, jitterRate=0.07283537089824677}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T05:49:03,989 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T05:49:03,990 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731736143976Writing region info on filesystem at 1731736143976Initializing all the Stores at 1731736143977 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736143977Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736143978 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736143978Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736143978Cleaning up temporary data from old regions at 1731736143986 (+8 ms)Running coprocessor post-open hooks at 1731736143989 (+3 ms)Region opened successfully at 1731736143990 (+1 ms) 2024-11-16T05:49:03,991 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731736143947 2024-11-16T05:49:03,993 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T05:49:03,994 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T05:49:03,994 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,46399,1731736143165 2024-11-16T05:49:03,995 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3456ee6a3164,46399,1731736143165, state=OPEN 2024-11-16T05:49:03,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46399-0x1004713e1710001, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T05:49:03,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33323-0x1004713e1710000, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T05:49:03,999 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3456ee6a3164,46399,1731736143165 2024-11-16T05:49:03,999 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T05:49:03,999 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T05:49:04,002 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T05:49:04,003 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3456ee6a3164,46399,1731736143165 in 207 msec 2024-11-16T05:49:04,005 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T05:49:04,005 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 623 msec 2024-11-16T05:49:04,006 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T05:49:04,006 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T05:49:04,007 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T05:49:04,007 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,46399,1731736143165, seqNum=-1] 2024-11-16T05:49:04,008 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T05:49:04,009 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41373, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T05:49:04,014 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 674 msec 2024-11-16T05:49:04,014 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731736144014, completionTime=-1 2024-11-16T05:49:04,014 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T05:49:04,014 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-16T05:49:04,016 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-16T05:49:04,016 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731736204016 2024-11-16T05:49:04,016 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731736264016 2024-11-16T05:49:04,016 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-16T05:49:04,016 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,33323,1731736143122-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:04,017 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,33323,1731736143122-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:04,017 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,33323,1731736143122-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:04,017 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3456ee6a3164:33323, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:04,017 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:04,017 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:04,018 DEBUG [master/3456ee6a3164:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T05:49:04,020 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.829sec 2024-11-16T05:49:04,020 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T05:49:04,020 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T05:49:04,020 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T05:49:04,020 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T05:49:04,020 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T05:49:04,020 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,33323,1731736143122-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T05:49:04,020 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,33323,1731736143122-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T05:49:04,023 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T05:49:04,023 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T05:49:04,023 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,33323,1731736143122-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:04,079 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3349f92e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T05:49:04,079 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,33323,-1 for getting cluster id 2024-11-16T05:49:04,079 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T05:49:04,080 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c007d917-fc6b-48dc-b131-afc26c1e4531' 2024-11-16T05:49:04,081 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T05:49:04,081 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c007d917-fc6b-48dc-b131-afc26c1e4531" 2024-11-16T05:49:04,081 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3fa6cac7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T05:49:04,081 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,33323,-1] 2024-11-16T05:49:04,081 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T05:49:04,081 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:49:04,082 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45608, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T05:49:04,083 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cc67b7e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T05:49:04,084 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T05:49:04,085 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,46399,1731736143165, seqNum=-1] 2024-11-16T05:49:04,085 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T05:49:04,086 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57190, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T05:49:04,087 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3456ee6a3164,33323,1731736143122 2024-11-16T05:49:04,088 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:49:04,090 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T05:49:04,090 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-16T05:49:04,091 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 3456ee6a3164,33323,1731736143122 2024-11-16T05:49:04,091 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3fcf6cde 2024-11-16T05:49:04,091 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-16T05:49:04,092 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45618, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-16T05:49:04,093 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33323 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-16T05:49:04,093 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33323 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-16T05:49:04,093 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33323 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T05:49:04,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33323 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T05:49:04,096 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T05:49:04,096 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:49:04,096 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33323 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-16T05:49:04,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33323 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T05:49:04,097 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T05:49:04,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38631 is added to blk_1073741835_1011 (size=405) 2024-11-16T05:49:04,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37139 is added to blk_1073741835_1011 (size=405) 2024-11-16T05:49:04,105 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 28e0c5f3af5e9ad9c5bdc72c68305956, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410 2024-11-16T05:49:04,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38631 is added to blk_1073741836_1012 (size=88) 2024-11-16T05:49:04,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37139 is added to blk_1073741836_1012 (size=88) 2024-11-16T05:49:04,112 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:49:04,112 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 28e0c5f3af5e9ad9c5bdc72c68305956, disabling compactions & flushes 2024-11-16T05:49:04,112 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956. 2024-11-16T05:49:04,112 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956. 2024-11-16T05:49:04,112 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956. after waiting 0 ms 2024-11-16T05:49:04,113 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956. 2024-11-16T05:49:04,113 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956. 2024-11-16T05:49:04,113 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 28e0c5f3af5e9ad9c5bdc72c68305956: Waiting for close lock at 1731736144112Disabling compacts and flushes for region at 1731736144112Disabling writes for close at 1731736144112Writing region close event to WAL at 1731736144113 (+1 ms)Closed at 1731736144113 2024-11-16T05:49:04,114 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T05:49:04,114 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731736144114"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731736144114"}]},"ts":"1731736144114"} 2024-11-16T05:49:04,116 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-16T05:49:04,118 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T05:49:04,118 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731736144118"}]},"ts":"1731736144118"} 2024-11-16T05:49:04,120 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-16T05:49:04,120 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=28e0c5f3af5e9ad9c5bdc72c68305956, ASSIGN}] 2024-11-16T05:49:04,122 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=28e0c5f3af5e9ad9c5bdc72c68305956, ASSIGN 2024-11-16T05:49:04,123 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=28e0c5f3af5e9ad9c5bdc72c68305956, ASSIGN; state=OFFLINE, location=3456ee6a3164,46399,1731736143165; forceNewPlan=false, retain=false 2024-11-16T05:49:04,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:04,273 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=28e0c5f3af5e9ad9c5bdc72c68305956, regionState=OPENING, regionLocation=3456ee6a3164,46399,1731736143165 2024-11-16T05:49:04,276 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=28e0c5f3af5e9ad9c5bdc72c68305956, ASSIGN because future has completed 2024-11-16T05:49:04,277 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 28e0c5f3af5e9ad9c5bdc72c68305956, server=3456ee6a3164,46399,1731736143165}] 2024-11-16T05:49:04,305 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-16T05:49:04,441 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956. 2024-11-16T05:49:04,442 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 28e0c5f3af5e9ad9c5bdc72c68305956, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956.', STARTKEY => '', ENDKEY => ''} 2024-11-16T05:49:04,442 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 28e0c5f3af5e9ad9c5bdc72c68305956 2024-11-16T05:49:04,442 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:49:04,442 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 28e0c5f3af5e9ad9c5bdc72c68305956 2024-11-16T05:49:04,442 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 28e0c5f3af5e9ad9c5bdc72c68305956 2024-11-16T05:49:04,444 INFO [StoreOpener-28e0c5f3af5e9ad9c5bdc72c68305956-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 28e0c5f3af5e9ad9c5bdc72c68305956 2024-11-16T05:49:04,446 INFO [StoreOpener-28e0c5f3af5e9ad9c5bdc72c68305956-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 28e0c5f3af5e9ad9c5bdc72c68305956 columnFamilyName info 2024-11-16T05:49:04,446 DEBUG [StoreOpener-28e0c5f3af5e9ad9c5bdc72c68305956-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:49:04,447 INFO [StoreOpener-28e0c5f3af5e9ad9c5bdc72c68305956-1 {}] regionserver.HStore(327): Store=28e0c5f3af5e9ad9c5bdc72c68305956/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T05:49:04,447 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 28e0c5f3af5e9ad9c5bdc72c68305956 2024-11-16T05:49:04,448 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956 2024-11-16T05:49:04,449 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956 2024-11-16T05:49:04,449 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 28e0c5f3af5e9ad9c5bdc72c68305956 2024-11-16T05:49:04,450 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 28e0c5f3af5e9ad9c5bdc72c68305956 2024-11-16T05:49:04,452 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 28e0c5f3af5e9ad9c5bdc72c68305956 2024-11-16T05:49:04,455 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T05:49:04,456 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 28e0c5f3af5e9ad9c5bdc72c68305956; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=717360, jitterRate=-0.08783063292503357}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T05:49:04,456 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 28e0c5f3af5e9ad9c5bdc72c68305956 2024-11-16T05:49:04,457 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 28e0c5f3af5e9ad9c5bdc72c68305956: Running coprocessor pre-open hook at 1731736144442Writing region info on filesystem at 1731736144442Initializing all the Stores at 1731736144444 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736144444Cleaning up temporary data from old regions at 1731736144450 (+6 ms)Running coprocessor post-open hooks at 1731736144456 (+6 ms)Region opened successfully at 1731736144457 (+1 ms) 2024-11-16T05:49:04,459 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956., pid=6, masterSystemTime=1731736144431 2024-11-16T05:49:04,462 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956. 2024-11-16T05:49:04,462 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956. 2024-11-16T05:49:04,464 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=28e0c5f3af5e9ad9c5bdc72c68305956, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,46399,1731736143165 2024-11-16T05:49:04,467 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 28e0c5f3af5e9ad9c5bdc72c68305956, server=3456ee6a3164,46399,1731736143165 because future has completed 2024-11-16T05:49:04,473 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-16T05:49:04,473 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 28e0c5f3af5e9ad9c5bdc72c68305956, server=3456ee6a3164,46399,1731736143165 in 192 msec 2024-11-16T05:49:04,475 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-16T05:49:04,476 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=28e0c5f3af5e9ad9c5bdc72c68305956, ASSIGN in 353 msec 2024-11-16T05:49:04,476 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T05:49:04,477 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731736144476"}]},"ts":"1731736144476"} 2024-11-16T05:49:04,478 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:04,478 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-16T05:49:04,479 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T05:49:04,481 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 387 msec 2024-11-16T05:49:05,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:05,479 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:06,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:06,480 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:07,262 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:07,481 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:08,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:08,482 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:09,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:09,481 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T05:49:09,484 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:09,484 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:09,484 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:09,485 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:09,485 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:09,486 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:09,486 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:09,509 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:09,509 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:09,509 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:09,509 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:09,510 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:09,510 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:09,513 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:09,514 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:09,514 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:09,516 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:09,521 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T05:49:09,521 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-16T05:49:10,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:10,485 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:11,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:11,486 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:12,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:12,487 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:13,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:13,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:14,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33323 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T05:49:14,110 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-16T05:49:14,110 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-16T05:49:14,116 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T05:49:14,116 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956. 2024-11-16T05:49:14,122 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956., hostname=3456ee6a3164,46399,1731736143165, seqNum=2] 2024-11-16T05:49:14,130 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33323 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T05:49:14,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33323 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T05:49:14,136 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-16T05:49:14,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33323 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-16T05:49:14,137 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-16T05:49:14,138 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-16T05:49:14,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:14,302 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46399 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-16T05:49:14,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956. 2024-11-16T05:49:14,304 INFO [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 28e0c5f3af5e9ad9c5bdc72c68305956 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T05:49:14,304 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T05:49:14,304 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-16T05:49:14,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/.tmp/info/3ec43f49fb534a7d9b852f57b4c23794 is 1080, key is row0001/info:/1731736154125/Put/seqid=0 2024-11-16T05:49:14,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38631 is added to blk_1073741837_1013 (size=6033) 2024-11-16T05:49:14,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37139 is added to blk_1073741837_1013 (size=6033) 2024-11-16T05:49:14,324 INFO [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/.tmp/info/3ec43f49fb534a7d9b852f57b4c23794 2024-11-16T05:49:14,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/.tmp/info/3ec43f49fb534a7d9b852f57b4c23794 as hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/info/3ec43f49fb534a7d9b852f57b4c23794 2024-11-16T05:49:14,337 INFO [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/info/3ec43f49fb534a7d9b852f57b4c23794, entries=1, sequenceid=5, filesize=5.9 K 2024-11-16T05:49:14,338 INFO [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 28e0c5f3af5e9ad9c5bdc72c68305956 in 35ms, sequenceid=5, compaction requested=false 2024-11-16T05:49:14,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 28e0c5f3af5e9ad9c5bdc72c68305956: 2024-11-16T05:49:14,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956. 2024-11-16T05:49:14,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-16T05:49:14,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33323 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-16T05:49:14,345 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-16T05:49:14,345 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 205 msec 2024-11-16T05:49:14,347 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 214 msec 2024-11-16T05:49:14,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:15,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:15,491 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:16,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:16,492 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:17,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:17,493 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:18,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:18,494 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:19,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:19,495 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:20,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:20,496 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:21,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:21,497 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:22,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:22,498 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:23,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:23,499 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:24,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33323 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-16T05:49:24,151 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-16T05:49:24,156 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33323 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T05:49:24,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33323 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T05:49:24,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33323 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-16T05:49:24,161 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-16T05:49:24,162 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-16T05:49:24,162 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-16T05:49:24,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:24,315 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46399 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-16T05:49:24,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956. 2024-11-16T05:49:24,315 INFO [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 28e0c5f3af5e9ad9c5bdc72c68305956 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T05:49:24,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/.tmp/info/c17781a2c62c438c8520e84a88aca56d is 1080, key is row0002/info:/1731736164153/Put/seqid=0 2024-11-16T05:49:24,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37139 is added to blk_1073741838_1014 (size=6033) 2024-11-16T05:49:24,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38631 is added to blk_1073741838_1014 (size=6033) 2024-11-16T05:49:24,325 INFO [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/.tmp/info/c17781a2c62c438c8520e84a88aca56d 2024-11-16T05:49:24,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/.tmp/info/c17781a2c62c438c8520e84a88aca56d as hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/info/c17781a2c62c438c8520e84a88aca56d 2024-11-16T05:49:24,337 INFO [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/info/c17781a2c62c438c8520e84a88aca56d, entries=1, sequenceid=9, filesize=5.9 K 2024-11-16T05:49:24,338 INFO [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 28e0c5f3af5e9ad9c5bdc72c68305956 in 23ms, sequenceid=9, compaction requested=false 2024-11-16T05:49:24,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 28e0c5f3af5e9ad9c5bdc72c68305956: 2024-11-16T05:49:24,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956. 2024-11-16T05:49:24,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-16T05:49:24,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33323 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-16T05:49:24,342 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-16T05:49:24,342 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 178 msec 2024-11-16T05:49:24,344 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 186 msec 2024-11-16T05:49:24,500 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:25,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:25,501 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:26,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:26,502 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:27,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:27,502 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:28,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:28,503 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:29,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:29,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 after 68079ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor205.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:49:29,503 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:29,504 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta after 68074ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor205.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T05:49:30,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:30,504 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:31,286 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:31,505 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:32,286 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:32,505 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:33,103 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T05:49:33,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:33,506 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:34,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33323 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-16T05:49:34,259 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-16T05:49:34,263 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C46399%2C1731736143165.1731736174263 2024-11-16T05:49:34,272 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:49:34,273 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:49:34,273 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:49:34,273 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:49:34,273 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:49:34,273 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/WALs/3456ee6a3164,46399,1731736143165/3456ee6a3164%2C46399%2C1731736143165.1731736143544 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/WALs/3456ee6a3164,46399,1731736143165/3456ee6a3164%2C46399%2C1731736143165.1731736174263 2024-11-16T05:49:34,275 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41693:41693),(127.0.0.1/127.0.0.1:33343:33343)] 2024-11-16T05:49:34,275 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/WALs/3456ee6a3164,46399,1731736143165/3456ee6a3164%2C46399%2C1731736143165.1731736143544 is not closed yet, will try archiving it next time 2024-11-16T05:49:34,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38631 is added to blk_1073741833_1009 (size=5546) 2024-11-16T05:49:34,276 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33323 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T05:49:34,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37139 is added to blk_1073741833_1009 (size=5546) 2024-11-16T05:49:34,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33323 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T05:49:34,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33323 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-16T05:49:34,278 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-16T05:49:34,279 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-16T05:49:34,280 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-16T05:49:34,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:34,433 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46399 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-16T05:49:34,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956. 2024-11-16T05:49:34,434 INFO [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 28e0c5f3af5e9ad9c5bdc72c68305956 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T05:49:34,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/.tmp/info/22bb1a95b1eb43b098ba10f3ddc72d52 is 1080, key is row0003/info:/1731736174260/Put/seqid=0 2024-11-16T05:49:34,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38631 is added to blk_1073741840_1016 (size=6033) 2024-11-16T05:49:34,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37139 is added to blk_1073741840_1016 (size=6033) 2024-11-16T05:49:34,445 INFO [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/.tmp/info/22bb1a95b1eb43b098ba10f3ddc72d52 2024-11-16T05:49:34,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/.tmp/info/22bb1a95b1eb43b098ba10f3ddc72d52 as hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/info/22bb1a95b1eb43b098ba10f3ddc72d52 2024-11-16T05:49:34,460 INFO [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/info/22bb1a95b1eb43b098ba10f3ddc72d52, entries=1, sequenceid=13, filesize=5.9 K 2024-11-16T05:49:34,462 INFO [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 28e0c5f3af5e9ad9c5bdc72c68305956 in 28ms, sequenceid=13, compaction requested=true 2024-11-16T05:49:34,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 28e0c5f3af5e9ad9c5bdc72c68305956: 2024-11-16T05:49:34,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956. 2024-11-16T05:49:34,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-16T05:49:34,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33323 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-16T05:49:34,468 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-16T05:49:34,468 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 184 msec 2024-11-16T05:49:34,472 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 193 msec 2024-11-16T05:49:34,506 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:35,288 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:35,507 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:36,288 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:36,508 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:37,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:37,509 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:38,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:38,510 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:39,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:39,511 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:40,292 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:40,512 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:41,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:41,512 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:42,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:42,513 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:43,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:43,514 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:44,295 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:44,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33323 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-16T05:49:44,369 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-16T05:49:44,369 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T05:49:44,372 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T05:49:44,372 DEBUG [Time-limited test {}] regionserver.HStore(1541): 28e0c5f3af5e9ad9c5bdc72c68305956/info is initiating minor compaction (all files) 2024-11-16T05:49:44,372 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T05:49:44,372 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:44,373 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 28e0c5f3af5e9ad9c5bdc72c68305956/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956. 2024-11-16T05:49:44,373 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/info/3ec43f49fb534a7d9b852f57b4c23794, hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/info/c17781a2c62c438c8520e84a88aca56d, hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/info/22bb1a95b1eb43b098ba10f3ddc72d52] into tmpdir=hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/.tmp, totalSize=17.7 K 2024-11-16T05:49:44,374 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 3ec43f49fb534a7d9b852f57b4c23794, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731736154125 2024-11-16T05:49:44,375 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting c17781a2c62c438c8520e84a88aca56d, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731736164153 2024-11-16T05:49:44,375 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 22bb1a95b1eb43b098ba10f3ddc72d52, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731736174260 2024-11-16T05:49:44,376 INFO [master/3456ee6a3164:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-16T05:49:44,377 INFO [master/3456ee6a3164:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-16T05:49:44,390 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 28e0c5f3af5e9ad9c5bdc72c68305956#info#compaction#45 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T05:49:44,390 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/.tmp/info/43cdefb9d8a947389d39c5b1bd603f7c is 1080, key is row0001/info:/1731736154125/Put/seqid=0 2024-11-16T05:49:44,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38631 is added to blk_1073741841_1017 (size=8296) 2024-11-16T05:49:44,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37139 is added to blk_1073741841_1017 (size=8296) 2024-11-16T05:49:44,403 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/.tmp/info/43cdefb9d8a947389d39c5b1bd603f7c as hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/info/43cdefb9d8a947389d39c5b1bd603f7c 2024-11-16T05:49:44,409 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 28e0c5f3af5e9ad9c5bdc72c68305956/info of 28e0c5f3af5e9ad9c5bdc72c68305956 into 43cdefb9d8a947389d39c5b1bd603f7c(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T05:49:44,409 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 28e0c5f3af5e9ad9c5bdc72c68305956: 2024-11-16T05:49:44,412 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C46399%2C1731736143165.1731736184412 2024-11-16T05:49:44,418 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:49:44,418 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:49:44,418 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:49:44,418 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:49:44,418 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:49:44,418 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/WALs/3456ee6a3164,46399,1731736143165/3456ee6a3164%2C46399%2C1731736143165.1731736174263 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/WALs/3456ee6a3164,46399,1731736143165/3456ee6a3164%2C46399%2C1731736143165.1731736184412 2024-11-16T05:49:44,419 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33343:33343),(127.0.0.1/127.0.0.1:41693:41693)] 2024-11-16T05:49:44,419 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/WALs/3456ee6a3164,46399,1731736143165/3456ee6a3164%2C46399%2C1731736143165.1731736174263 is not closed yet, will try archiving it next time 2024-11-16T05:49:44,420 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/WALs/3456ee6a3164,46399,1731736143165/3456ee6a3164%2C46399%2C1731736143165.1731736143544 to hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/oldWALs/3456ee6a3164%2C46399%2C1731736143165.1731736143544 2024-11-16T05:49:44,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37139 is added to blk_1073741839_1015 (size=2520) 2024-11-16T05:49:44,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38631 is added to blk_1073741839_1015 (size=2520) 2024-11-16T05:49:44,420 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33323 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T05:49:44,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33323 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T05:49:44,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33323 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-16T05:49:44,423 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-16T05:49:44,423 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-16T05:49:44,424 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-16T05:49:44,514 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:44,577 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46399 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-16T05:49:44,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956. 2024-11-16T05:49:44,577 INFO [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 28e0c5f3af5e9ad9c5bdc72c68305956 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T05:49:44,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/.tmp/info/fcf970355207462e9a5ac4b18776a391 is 1080, key is row0000/info:/1731736184410/Put/seqid=0 2024-11-16T05:49:44,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38631 is added to blk_1073741843_1019 (size=6033) 2024-11-16T05:49:44,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37139 is added to blk_1073741843_1019 (size=6033) 2024-11-16T05:49:44,586 INFO [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/.tmp/info/fcf970355207462e9a5ac4b18776a391 2024-11-16T05:49:44,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/.tmp/info/fcf970355207462e9a5ac4b18776a391 as hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/info/fcf970355207462e9a5ac4b18776a391 2024-11-16T05:49:44,598 INFO [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/info/fcf970355207462e9a5ac4b18776a391, entries=1, sequenceid=18, filesize=5.9 K 2024-11-16T05:49:44,599 INFO [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 28e0c5f3af5e9ad9c5bdc72c68305956 in 22ms, sequenceid=18, compaction requested=false 2024-11-16T05:49:44,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 28e0c5f3af5e9ad9c5bdc72c68305956: 2024-11-16T05:49:44,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956. 2024-11-16T05:49:44,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-16T05:49:44,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33323 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-16T05:49:44,604 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-16T05:49:44,604 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 177 msec 2024-11-16T05:49:44,606 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 185 msec 2024-11-16T05:49:45,296 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:45,515 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:46,297 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:46,515 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:47,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:47,516 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:48,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:48,517 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:49,299 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:49,442 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 28e0c5f3af5e9ad9c5bdc72c68305956, had cached 0 bytes from a total of 14329 2024-11-16T05:49:49,518 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:50,299 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:50,519 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:51,300 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:51,520 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:52,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:52,521 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:53,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:53,521 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:54,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:54,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33323 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-16T05:49:54,519 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-16T05:49:54,522 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:54,525 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C46399%2C1731736143165.1731736194524 2024-11-16T05:49:54,531 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:49:54,531 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:49:54,531 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:49:54,531 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:49:54,532 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:49:54,532 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/WALs/3456ee6a3164,46399,1731736143165/3456ee6a3164%2C46399%2C1731736143165.1731736184412 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/WALs/3456ee6a3164,46399,1731736143165/3456ee6a3164%2C46399%2C1731736143165.1731736194524 2024-11-16T05:49:54,533 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33343:33343),(127.0.0.1/127.0.0.1:41693:41693)] 2024-11-16T05:49:54,533 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/WALs/3456ee6a3164,46399,1731736143165/3456ee6a3164%2C46399%2C1731736143165.1731736184412 is not closed yet, will try archiving it next time 2024-11-16T05:49:54,533 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/WALs/3456ee6a3164,46399,1731736143165/3456ee6a3164%2C46399%2C1731736143165.1731736174263 to hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/oldWALs/3456ee6a3164%2C46399%2C1731736143165.1731736174263 2024-11-16T05:49:54,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T05:49:54,533 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T05:49:54,533 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T05:49:54,533 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:49:54,533 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:49:54,534 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T05:49:54,534 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=441201650, stopped=false 2024-11-16T05:49:54,534 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T05:49:54,534 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3456ee6a3164,33323,1731736143122 2024-11-16T05:49:54,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38631 is added to blk_1073741842_1018 (size=2026) 2024-11-16T05:49:54,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33323-0x1004713e1710000, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T05:49:54,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46399-0x1004713e1710001, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T05:49:54,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33323-0x1004713e1710000, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:49:54,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46399-0x1004713e1710001, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:49:54,536 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T05:49:54,536 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T05:49:54,536 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T05:49:54,536 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:49:54,536 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3456ee6a3164,46399,1731736143165' ***** 2024-11-16T05:49:54,536 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T05:49:54,536 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33323-0x1004713e1710000, quorum=127.0.0.1:62607, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T05:49:54,537 INFO [RS:0;3456ee6a3164:46399 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T05:49:54,537 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46399-0x1004713e1710001, quorum=127.0.0.1:62607, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T05:49:54,537 INFO [RS:0;3456ee6a3164:46399 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T05:49:54,537 INFO [RS:0;3456ee6a3164:46399 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T05:49:54,537 INFO [RS:0;3456ee6a3164:46399 {}] regionserver.HRegionServer(3091): Received CLOSE for 28e0c5f3af5e9ad9c5bdc72c68305956 2024-11-16T05:49:54,537 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T05:49:54,537 INFO [RS:0;3456ee6a3164:46399 {}] regionserver.HRegionServer(959): stopping server 3456ee6a3164,46399,1731736143165 2024-11-16T05:49:54,537 INFO [RS:0;3456ee6a3164:46399 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T05:49:54,537 INFO [RS:0;3456ee6a3164:46399 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3456ee6a3164:46399. 2024-11-16T05:49:54,537 DEBUG [RS:0;3456ee6a3164:46399 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T05:49:54,538 DEBUG [RS:0;3456ee6a3164:46399 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:49:54,538 INFO [RS:0;3456ee6a3164:46399 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T05:49:54,538 INFO [RS:0;3456ee6a3164:46399 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T05:49:54,538 INFO [RS:0;3456ee6a3164:46399 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T05:49:54,538 INFO [RS:0;3456ee6a3164:46399 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T05:49:54,538 INFO [RS:0;3456ee6a3164:46399 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-16T05:49:54,538 DEBUG [RS:0;3456ee6a3164:46399 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 28e0c5f3af5e9ad9c5bdc72c68305956=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956.} 2024-11-16T05:49:54,538 DEBUG [RS:0;3456ee6a3164:46399 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 28e0c5f3af5e9ad9c5bdc72c68305956 2024-11-16T05:49:54,538 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T05:49:54,538 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 28e0c5f3af5e9ad9c5bdc72c68305956, disabling compactions & flushes 2024-11-16T05:49:54,538 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T05:49:54,538 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956. 2024-11-16T05:49:54,538 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T05:49:54,538 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956. 2024-11-16T05:49:54,538 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T05:49:54,538 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T05:49:54,538 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956. after waiting 0 ms 2024-11-16T05:49:54,538 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956. 2024-11-16T05:49:54,538 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-16T05:49:54,539 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 28e0c5f3af5e9ad9c5bdc72c68305956 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T05:49:54,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37139 is added to blk_1073741842_1018 (size=2026) 2024-11-16T05:49:54,546 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/.tmp/info/89caa55cb3a84af68eccb52ed73795aa is 1080, key is row0001/info:/1731736194521/Put/seqid=0 2024-11-16T05:49:54,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38631 is added to blk_1073741845_1021 (size=6033) 2024-11-16T05:49:54,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37139 is added to blk_1073741845_1021 (size=6033) 2024-11-16T05:49:54,551 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/.tmp/info/89caa55cb3a84af68eccb52ed73795aa 2024-11-16T05:49:54,557 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/.tmp/info/89caa55cb3a84af68eccb52ed73795aa as hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/info/89caa55cb3a84af68eccb52ed73795aa 2024-11-16T05:49:54,559 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/hbase/meta/1588230740/.tmp/info/63566c39b5bb410e8e9def5cf963913d is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956./info:regioninfo/1731736144463/Put/seqid=0 2024-11-16T05:49:54,564 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/info/89caa55cb3a84af68eccb52ed73795aa, entries=1, sequenceid=22, filesize=5.9 K 2024-11-16T05:49:54,565 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 28e0c5f3af5e9ad9c5bdc72c68305956 in 27ms, sequenceid=22, compaction requested=true 2024-11-16T05:49:54,565 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/info/3ec43f49fb534a7d9b852f57b4c23794, hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/info/c17781a2c62c438c8520e84a88aca56d, hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/info/22bb1a95b1eb43b098ba10f3ddc72d52] to archive 2024-11-16T05:49:54,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38631 is added to blk_1073741846_1022 (size=7308) 2024-11-16T05:49:54,567 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T05:49:54,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37139 is added to blk_1073741846_1022 (size=7308) 2024-11-16T05:49:54,567 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/hbase/meta/1588230740/.tmp/info/63566c39b5bb410e8e9def5cf963913d 2024-11-16T05:49:54,568 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/info/3ec43f49fb534a7d9b852f57b4c23794 to hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/info/3ec43f49fb534a7d9b852f57b4c23794 2024-11-16T05:49:54,570 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/info/c17781a2c62c438c8520e84a88aca56d to hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/info/c17781a2c62c438c8520e84a88aca56d 2024-11-16T05:49:54,571 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/info/22bb1a95b1eb43b098ba10f3ddc72d52 to hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/info/22bb1a95b1eb43b098ba10f3ddc72d52 2024-11-16T05:49:54,571 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=3456ee6a3164:33323 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-16T05:49:54,571 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [3ec43f49fb534a7d9b852f57b4c23794=6033, c17781a2c62c438c8520e84a88aca56d=6033, 22bb1a95b1eb43b098ba10f3ddc72d52=6033] 2024-11-16T05:49:54,575 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/28e0c5f3af5e9ad9c5bdc72c68305956/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-16T05:49:54,575 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956. 2024-11-16T05:49:54,575 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 28e0c5f3af5e9ad9c5bdc72c68305956: Waiting for close lock at 1731736194538Running coprocessor pre-close hooks at 1731736194538Disabling compacts and flushes for region at 1731736194538Disabling writes for close at 1731736194538Obtaining lock to block concurrent updates at 1731736194539 (+1 ms)Preparing flush snapshotting stores in 28e0c5f3af5e9ad9c5bdc72c68305956 at 1731736194539Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731736194539Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956. at 1731736194540 (+1 ms)Flushing 28e0c5f3af5e9ad9c5bdc72c68305956/info: creating writer at 1731736194540Flushing 28e0c5f3af5e9ad9c5bdc72c68305956/info: appending metadata at 1731736194546 (+6 ms)Flushing 28e0c5f3af5e9ad9c5bdc72c68305956/info: closing flushed file at 1731736194546Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@679a59a9: reopening flushed file at 1731736194556 (+10 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 28e0c5f3af5e9ad9c5bdc72c68305956 in 27ms, sequenceid=22, compaction requested=true at 1731736194565 (+9 ms)Writing region close event to WAL at 1731736194572 (+7 ms)Running coprocessor post-close hooks at 1731736194575 (+3 ms)Closed at 1731736194575 2024-11-16T05:49:54,575 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731736144093.28e0c5f3af5e9ad9c5bdc72c68305956. 2024-11-16T05:49:54,589 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/hbase/meta/1588230740/.tmp/ns/c86e6822eba34ce5aa656f5c23666dde is 43, key is default/ns:d/1731736144009/Put/seqid=0 2024-11-16T05:49:54,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37139 is added to blk_1073741847_1023 (size=5153) 2024-11-16T05:49:54,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38631 is added to blk_1073741847_1023 (size=5153) 2024-11-16T05:49:54,594 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/hbase/meta/1588230740/.tmp/ns/c86e6822eba34ce5aa656f5c23666dde 2024-11-16T05:49:54,614 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/hbase/meta/1588230740/.tmp/table/ff6bb908e77b4ac8a7ac4614de92c63a is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731736144476/Put/seqid=0 2024-11-16T05:49:54,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38631 is added to blk_1073741848_1024 (size=5508) 2024-11-16T05:49:54,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37139 is added to blk_1073741848_1024 (size=5508) 2024-11-16T05:49:54,618 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/hbase/meta/1588230740/.tmp/table/ff6bb908e77b4ac8a7ac4614de92c63a 2024-11-16T05:49:54,624 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/hbase/meta/1588230740/.tmp/info/63566c39b5bb410e8e9def5cf963913d as hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/hbase/meta/1588230740/info/63566c39b5bb410e8e9def5cf963913d 2024-11-16T05:49:54,630 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/hbase/meta/1588230740/info/63566c39b5bb410e8e9def5cf963913d, entries=10, sequenceid=11, filesize=7.1 K 2024-11-16T05:49:54,631 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/hbase/meta/1588230740/.tmp/ns/c86e6822eba34ce5aa656f5c23666dde as hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/hbase/meta/1588230740/ns/c86e6822eba34ce5aa656f5c23666dde 2024-11-16T05:49:54,636 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/hbase/meta/1588230740/ns/c86e6822eba34ce5aa656f5c23666dde, entries=2, sequenceid=11, filesize=5.0 K 2024-11-16T05:49:54,637 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/hbase/meta/1588230740/.tmp/table/ff6bb908e77b4ac8a7ac4614de92c63a as hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/hbase/meta/1588230740/table/ff6bb908e77b4ac8a7ac4614de92c63a 2024-11-16T05:49:54,642 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/hbase/meta/1588230740/table/ff6bb908e77b4ac8a7ac4614de92c63a, entries=2, sequenceid=11, filesize=5.4 K 2024-11-16T05:49:54,643 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 105ms, sequenceid=11, compaction requested=false 2024-11-16T05:49:54,647 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-16T05:49:54,648 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T05:49:54,648 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T05:49:54,648 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731736194538Running coprocessor pre-close hooks at 1731736194538Disabling compacts and flushes for region at 1731736194538Disabling writes for close at 1731736194538Obtaining lock to block concurrent updates at 1731736194538Preparing flush snapshotting stores in 1588230740 at 1731736194538Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731736194539 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731736194540 (+1 ms)Flushing 1588230740/info: creating writer at 1731736194540Flushing 1588230740/info: appending metadata at 1731736194558 (+18 ms)Flushing 1588230740/info: closing flushed file at 1731736194558Flushing 1588230740/ns: creating writer at 1731736194572 (+14 ms)Flushing 1588230740/ns: appending metadata at 1731736194589 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1731736194589Flushing 1588230740/table: creating writer at 1731736194599 (+10 ms)Flushing 1588230740/table: appending metadata at 1731736194613 (+14 ms)Flushing 1588230740/table: closing flushed file at 1731736194613Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5eafd7fa: reopening flushed file at 1731736194623 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6e683554: reopening flushed file at 1731736194630 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@31b6c8a: reopening flushed file at 1731736194636 (+6 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 105ms, sequenceid=11, compaction requested=false at 1731736194643 (+7 ms)Writing region close event to WAL at 1731736194644 (+1 ms)Running coprocessor post-close hooks at 1731736194648 (+4 ms)Closed at 1731736194648 2024-11-16T05:49:54,648 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T05:49:54,738 INFO [RS:0;3456ee6a3164:46399 {}] regionserver.HRegionServer(976): stopping server 3456ee6a3164,46399,1731736143165; all regions closed. 2024-11-16T05:49:54,739 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:49:54,740 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:49:54,740 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:49:54,740 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:49:54,740 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:49:54,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37139 is added to blk_1073741834_1010 (size=3306) 2024-11-16T05:49:54,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38631 is added to blk_1073741834_1010 (size=3306) 2024-11-16T05:49:54,750 DEBUG [RS:0;3456ee6a3164:46399 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/oldWALs 2024-11-16T05:49:54,750 INFO [RS:0;3456ee6a3164:46399 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3456ee6a3164%2C46399%2C1731736143165.meta:.meta(num 1731736143962) 2024-11-16T05:49:54,751 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:49:54,751 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:49:54,751 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:49:54,751 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:49:54,751 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:49:54,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38631 is added to blk_1073741844_1020 (size=1252) 2024-11-16T05:49:54,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37139 is added to blk_1073741844_1020 (size=1252) 2024-11-16T05:49:54,759 DEBUG [RS:0;3456ee6a3164:46399 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/oldWALs 2024-11-16T05:49:54,759 INFO [RS:0;3456ee6a3164:46399 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3456ee6a3164%2C46399%2C1731736143165:(num 1731736194524) 2024-11-16T05:49:54,759 DEBUG [RS:0;3456ee6a3164:46399 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:49:54,759 INFO [RS:0;3456ee6a3164:46399 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T05:49:54,760 INFO [RS:0;3456ee6a3164:46399 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T05:49:54,760 INFO [RS:0;3456ee6a3164:46399 {}] hbase.ChoreService(370): Chore service for: regionserver/3456ee6a3164:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T05:49:54,760 INFO [RS:0;3456ee6a3164:46399 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T05:49:54,760 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T05:49:54,760 INFO [RS:0;3456ee6a3164:46399 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46399 2024-11-16T05:49:54,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33323-0x1004713e1710000, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T05:49:54,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46399-0x1004713e1710001, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3456ee6a3164,46399,1731736143165 2024-11-16T05:49:54,764 INFO [RS:0;3456ee6a3164:46399 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T05:49:54,765 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3456ee6a3164,46399,1731736143165] 2024-11-16T05:49:54,767 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3456ee6a3164,46399,1731736143165 already deleted, retry=false 2024-11-16T05:49:54,767 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3456ee6a3164,46399,1731736143165 expired; onlineServers=0 2024-11-16T05:49:54,767 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3456ee6a3164,33323,1731736143122' ***** 2024-11-16T05:49:54,767 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T05:49:54,767 INFO [M:0;3456ee6a3164:33323 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T05:49:54,768 INFO [M:0;3456ee6a3164:33323 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T05:49:54,768 DEBUG [M:0;3456ee6a3164:33323 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T05:49:54,768 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T05:49:54,768 DEBUG [M:0;3456ee6a3164:33323 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T05:49:54,768 DEBUG [master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.large.0-1731736143344 {}] cleaner.HFileCleaner(306): Exit Thread[master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.large.0-1731736143344,5,FailOnTimeoutGroup] 2024-11-16T05:49:54,768 DEBUG [master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.small.0-1731736143344 {}] cleaner.HFileCleaner(306): Exit Thread[master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.small.0-1731736143344,5,FailOnTimeoutGroup] 2024-11-16T05:49:54,768 INFO [M:0;3456ee6a3164:33323 {}] hbase.ChoreService(370): Chore service for: master/3456ee6a3164:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T05:49:54,768 INFO [M:0;3456ee6a3164:33323 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T05:49:54,768 DEBUG [M:0;3456ee6a3164:33323 {}] master.HMaster(1795): Stopping service threads 2024-11-16T05:49:54,768 INFO [M:0;3456ee6a3164:33323 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T05:49:54,768 INFO [M:0;3456ee6a3164:33323 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T05:49:54,769 INFO [M:0;3456ee6a3164:33323 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T05:49:54,769 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T05:49:54,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33323-0x1004713e1710000, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T05:49:54,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33323-0x1004713e1710000, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:49:54,769 DEBUG [M:0;3456ee6a3164:33323 {}] zookeeper.ZKUtil(347): master:33323-0x1004713e1710000, quorum=127.0.0.1:62607, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T05:49:54,769 WARN [M:0;3456ee6a3164:33323 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T05:49:54,770 INFO [M:0;3456ee6a3164:33323 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/.lastflushedseqids 2024-11-16T05:49:54,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37139 is added to blk_1073741849_1025 (size=130) 2024-11-16T05:49:54,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38631 is added to blk_1073741849_1025 (size=130) 2024-11-16T05:49:54,775 INFO [M:0;3456ee6a3164:33323 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T05:49:54,775 INFO [M:0;3456ee6a3164:33323 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T05:49:54,775 DEBUG [M:0;3456ee6a3164:33323 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T05:49:54,775 INFO [M:0;3456ee6a3164:33323 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:49:54,775 DEBUG [M:0;3456ee6a3164:33323 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:49:54,775 DEBUG [M:0;3456ee6a3164:33323 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T05:49:54,775 DEBUG [M:0;3456ee6a3164:33323 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:49:54,775 INFO [M:0;3456ee6a3164:33323 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.54 KB heapSize=54.91 KB 2024-11-16T05:49:54,791 DEBUG [M:0;3456ee6a3164:33323 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c3b47056725146b7b35ed97510dbf3cc is 82, key is hbase:meta,,1/info:regioninfo/1731736143994/Put/seqid=0 2024-11-16T05:49:54,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38631 is added to blk_1073741850_1026 (size=5672) 2024-11-16T05:49:54,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37139 is added to blk_1073741850_1026 (size=5672) 2024-11-16T05:49:54,796 INFO [M:0;3456ee6a3164:33323 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c3b47056725146b7b35ed97510dbf3cc 2024-11-16T05:49:54,814 DEBUG [M:0;3456ee6a3164:33323 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e9e925b91e2c4783b17e1359f944ae93 is 797, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731736144481/Put/seqid=0 2024-11-16T05:49:54,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37139 is added to blk_1073741851_1027 (size=7818) 2024-11-16T05:49:54,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38631 is added to blk_1073741851_1027 (size=7818) 2024-11-16T05:49:54,819 INFO [M:0;3456ee6a3164:33323 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.94 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e9e925b91e2c4783b17e1359f944ae93 2024-11-16T05:49:54,823 INFO [M:0;3456ee6a3164:33323 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e9e925b91e2c4783b17e1359f944ae93 2024-11-16T05:49:54,837 DEBUG [M:0;3456ee6a3164:33323 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/188bc39a56d544e9ac2cc8bc6a23e061 is 69, key is 3456ee6a3164,46399,1731736143165/rs:state/1731736143401/Put/seqid=0 2024-11-16T05:49:54,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37139 is added to blk_1073741852_1028 (size=5156) 2024-11-16T05:49:54,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38631 is added to blk_1073741852_1028 (size=5156) 2024-11-16T05:49:54,842 INFO [M:0;3456ee6a3164:33323 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/188bc39a56d544e9ac2cc8bc6a23e061 2024-11-16T05:49:54,863 DEBUG [M:0;3456ee6a3164:33323 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d067647d740945c7984fa459e01a889f is 52, key is load_balancer_on/state:d/1731736144089/Put/seqid=0 2024-11-16T05:49:54,866 INFO [RS:0;3456ee6a3164:46399 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T05:49:54,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46399-0x1004713e1710001, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T05:49:54,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46399-0x1004713e1710001, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T05:49:54,866 INFO [RS:0;3456ee6a3164:46399 {}] regionserver.HRegionServer(1031): Exiting; stopping=3456ee6a3164,46399,1731736143165; zookeeper connection closed. 2024-11-16T05:49:54,866 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@758a1d14 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@758a1d14 2024-11-16T05:49:54,866 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T05:49:54,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37139 is added to blk_1073741853_1029 (size=5056) 2024-11-16T05:49:54,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38631 is added to blk_1073741853_1029 (size=5056) 2024-11-16T05:49:54,867 INFO [M:0;3456ee6a3164:33323 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d067647d740945c7984fa459e01a889f 2024-11-16T05:49:54,872 DEBUG [M:0;3456ee6a3164:33323 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c3b47056725146b7b35ed97510dbf3cc as hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c3b47056725146b7b35ed97510dbf3cc 2024-11-16T05:49:54,877 INFO [M:0;3456ee6a3164:33323 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c3b47056725146b7b35ed97510dbf3cc, entries=8, sequenceid=121, filesize=5.5 K 2024-11-16T05:49:54,878 DEBUG [M:0;3456ee6a3164:33323 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e9e925b91e2c4783b17e1359f944ae93 as hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e9e925b91e2c4783b17e1359f944ae93 2024-11-16T05:49:54,883 INFO [M:0;3456ee6a3164:33323 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e9e925b91e2c4783b17e1359f944ae93 2024-11-16T05:49:54,883 INFO [M:0;3456ee6a3164:33323 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e9e925b91e2c4783b17e1359f944ae93, entries=14, sequenceid=121, filesize=7.6 K 2024-11-16T05:49:54,884 DEBUG [M:0;3456ee6a3164:33323 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/188bc39a56d544e9ac2cc8bc6a23e061 as hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/188bc39a56d544e9ac2cc8bc6a23e061 2024-11-16T05:49:54,889 INFO [M:0;3456ee6a3164:33323 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/188bc39a56d544e9ac2cc8bc6a23e061, entries=1, sequenceid=121, filesize=5.0 K 2024-11-16T05:49:54,890 DEBUG [M:0;3456ee6a3164:33323 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d067647d740945c7984fa459e01a889f as hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d067647d740945c7984fa459e01a889f 2024-11-16T05:49:54,894 INFO [M:0;3456ee6a3164:33323 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46665/user/jenkins/test-data/e0752554-59d1-fa24-0899-06e706cd0410/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d067647d740945c7984fa459e01a889f, entries=1, sequenceid=121, filesize=4.9 K 2024-11-16T05:49:54,896 INFO [M:0;3456ee6a3164:33323 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.54 KB/44590, heapSize ~54.85 KB/56168, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 120ms, sequenceid=121, compaction requested=false 2024-11-16T05:49:54,897 INFO [M:0;3456ee6a3164:33323 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:49:54,897 DEBUG [M:0;3456ee6a3164:33323 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731736194775Disabling compacts and flushes for region at 1731736194775Disabling writes for close at 1731736194775Obtaining lock to block concurrent updates at 1731736194775Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731736194775Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44590, getHeapSize=56168, getOffHeapSize=0, getCellsCount=140 at 1731736194776 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731736194776Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731736194776Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731736194791 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731736194791Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731736194800 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731736194814 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731736194814Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731736194824 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731736194836 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731736194836Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731736194847 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731736194862 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731736194862Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@47b05f09: reopening flushed file at 1731736194871 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5a321286: reopening flushed file at 1731736194877 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3ef13bc0: reopening flushed file at 1731736194883 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4427dec2: reopening flushed file at 1731736194889 (+6 ms)Finished flush of dataSize ~43.54 KB/44590, heapSize ~54.85 KB/56168, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 120ms, sequenceid=121, compaction requested=false at 1731736194896 (+7 ms)Writing region close event to WAL at 1731736194897 (+1 ms)Closed at 1731736194897 2024-11-16T05:49:54,898 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:49:54,898 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:49:54,898 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:49:54,898 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:49:54,898 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:49:54,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38631 is added to blk_1073741830_1006 (size=52987) 2024-11-16T05:49:54,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37139 is added to blk_1073741830_1006 (size=52987) 2024-11-16T05:49:54,901 INFO [M:0;3456ee6a3164:33323 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T05:49:54,901 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T05:49:54,901 INFO [M:0;3456ee6a3164:33323 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33323 2024-11-16T05:49:54,901 INFO [M:0;3456ee6a3164:33323 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T05:49:55,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33323-0x1004713e1710000, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T05:49:55,003 INFO [M:0;3456ee6a3164:33323 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T05:49:55,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33323-0x1004713e1710000, quorum=127.0.0.1:62607, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T05:49:55,007 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5afe5563{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:49:55,008 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2e9e090d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T05:49:55,008 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T05:49:55,008 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@57887c0e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T05:49:55,008 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4eefceda{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/hadoop.log.dir/,STOPPED} 2024-11-16T05:49:55,011 WARN [BP-1011988408-172.17.0.2-1731736142567 heartbeating to localhost/127.0.0.1:46665 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T05:49:55,011 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T05:49:55,011 WARN [BP-1011988408-172.17.0.2-1731736142567 heartbeating to localhost/127.0.0.1:46665 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1011988408-172.17.0.2-1731736142567 (Datanode Uuid 513f750c-11d8-4c92-982d-bbb697c36b28) service to localhost/127.0.0.1:46665 2024-11-16T05:49:55,011 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T05:49:55,011 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/cluster_b3319eb2-4a13-a12b-ba87-d83552692400/data/data3/current/BP-1011988408-172.17.0.2-1731736142567 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:49:55,012 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/cluster_b3319eb2-4a13-a12b-ba87-d83552692400/data/data4/current/BP-1011988408-172.17.0.2-1731736142567 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:49:55,012 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T05:49:55,014 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7331cedc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:49:55,014 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1aa3c43{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T05:49:55,015 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T05:49:55,015 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6b792597{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T05:49:55,015 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60563caf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/hadoop.log.dir/,STOPPED} 2024-11-16T05:49:55,016 WARN [BP-1011988408-172.17.0.2-1731736142567 heartbeating to localhost/127.0.0.1:46665 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T05:49:55,016 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T05:49:55,017 WARN [BP-1011988408-172.17.0.2-1731736142567 heartbeating to localhost/127.0.0.1:46665 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1011988408-172.17.0.2-1731736142567 (Datanode Uuid 3368ae5b-0160-440d-8c69-4e759bdc1c27) service to localhost/127.0.0.1:46665 2024-11-16T05:49:55,017 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T05:49:55,017 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/cluster_b3319eb2-4a13-a12b-ba87-d83552692400/data/data1/current/BP-1011988408-172.17.0.2-1731736142567 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:49:55,017 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/cluster_b3319eb2-4a13-a12b-ba87-d83552692400/data/data2/current/BP-1011988408-172.17.0.2-1731736142567 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:49:55,018 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T05:49:55,025 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1aa7e2ef{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T05:49:55,026 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7ed05334{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T05:49:55,026 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T05:49:55,026 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45ec22ac{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T05:49:55,027 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70358ec4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/hadoop.log.dir/,STOPPED} 2024-11-16T05:49:55,032 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T05:49:55,048 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T05:49:55,055 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=208 (was 181) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46665 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46665 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1436593709) connection to localhost/127.0.0.1:46665 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/3456ee6a3164:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1436593709) connection to localhost/127.0.0.1:46665 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46665 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1436593709) connection to localhost/127.0.0.1:46665 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:46665 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46665 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46665 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=127 (was 94) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3019 (was 2889) - AvailableMemoryMB LEAK? - 2024-11-16T05:49:55,061 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=208, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=127, ProcessCount=11, AvailableMemoryMB=3019 2024-11-16T05:49:55,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T05:49:55,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/hadoop.log.dir so I do NOT create it in target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e 2024-11-16T05:49:55,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/74c0971c-dadc-28f0-4427-662df9703079/hadoop.tmp.dir so I do NOT create it in target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e 2024-11-16T05:49:55,061 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/cluster_f441b136-a34d-58d2-7eaa-cb3df2d592f5, deleteOnExit=true 2024-11-16T05:49:55,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T05:49:55,062 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/test.cache.data in system properties and HBase conf 2024-11-16T05:49:55,062 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T05:49:55,062 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/hadoop.log.dir in system properties and HBase conf 2024-11-16T05:49:55,062 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T05:49:55,062 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T05:49:55,062 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T05:49:55,062 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T05:49:55,062 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T05:49:55,062 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T05:49:55,062 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T05:49:55,062 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T05:49:55,062 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T05:49:55,062 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T05:49:55,062 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T05:49:55,062 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T05:49:55,062 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T05:49:55,063 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/nfs.dump.dir in system properties and HBase conf 2024-11-16T05:49:55,063 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/java.io.tmpdir in system properties and HBase conf 2024-11-16T05:49:55,063 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T05:49:55,063 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T05:49:55,063 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T05:49:55,075 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T05:49:55,115 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:49:55,118 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T05:49:55,119 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T05:49:55,119 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T05:49:55,120 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T05:49:55,120 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:49:55,120 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7faafa6f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/hadoop.log.dir/,AVAILABLE} 2024-11-16T05:49:55,121 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1488736e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T05:49:55,214 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7596208e{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/java.io.tmpdir/jetty-localhost-45105-hadoop-hdfs-3_4_1-tests_jar-_-any-4792453678265625597/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T05:49:55,215 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@44ecb50d{HTTP/1.1, (http/1.1)}{localhost:45105} 2024-11-16T05:49:55,215 INFO [Time-limited test {}] server.Server(415): Started @236913ms 2024-11-16T05:49:55,226 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T05:49:55,282 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:49:55,285 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T05:49:55,285 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T05:49:55,286 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T05:49:55,286 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T05:49:55,286 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f3d0298{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/hadoop.log.dir/,AVAILABLE} 2024-11-16T05:49:55,286 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6475f7bc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T05:49:55,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:55,380 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@624c2d5a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/java.io.tmpdir/jetty-localhost-41447-hadoop-hdfs-3_4_1-tests_jar-_-any-8448872227119829807/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:49:55,380 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3d299317{HTTP/1.1, (http/1.1)}{localhost:41447} 2024-11-16T05:49:55,380 INFO [Time-limited test {}] server.Server(415): Started @237078ms 2024-11-16T05:49:55,381 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T05:49:55,407 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:49:55,409 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T05:49:55,413 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T05:49:55,413 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T05:49:55,413 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T05:49:55,413 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@26c07cbd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/hadoop.log.dir/,AVAILABLE} 2024-11-16T05:49:55,413 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a78718c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T05:49:55,415 INFO [regionserver/3456ee6a3164:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T05:49:55,442 WARN [Thread-1954 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/cluster_f441b136-a34d-58d2-7eaa-cb3df2d592f5/data/data1/current/BP-314551495-172.17.0.2-1731736195079/current, will proceed with Du for space computation calculation, 2024-11-16T05:49:55,443 WARN [Thread-1955 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/cluster_f441b136-a34d-58d2-7eaa-cb3df2d592f5/data/data2/current/BP-314551495-172.17.0.2-1731736195079/current, will proceed with Du for space computation calculation, 2024-11-16T05:49:55,456 WARN [Thread-1933 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T05:49:55,458 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x59afaa7303af9f0c with lease ID 0xa926067e68e3c0a8: Processing first storage report for DS-a14921f2-8698-4959-a804-2b9e37c481f5 from datanode DatanodeRegistration(127.0.0.1:36147, datanodeUuid=53f1d793-59d3-41db-a262-24c1bd191ae5, infoPort=34923, infoSecurePort=0, ipcPort=35307, storageInfo=lv=-57;cid=testClusterID;nsid=1017971573;c=1731736195079) 2024-11-16T05:49:55,458 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x59afaa7303af9f0c with lease ID 0xa926067e68e3c0a8: from storage DS-a14921f2-8698-4959-a804-2b9e37c481f5 node DatanodeRegistration(127.0.0.1:36147, datanodeUuid=53f1d793-59d3-41db-a262-24c1bd191ae5, infoPort=34923, infoSecurePort=0, ipcPort=35307, storageInfo=lv=-57;cid=testClusterID;nsid=1017971573;c=1731736195079), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:49:55,458 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x59afaa7303af9f0c with lease ID 0xa926067e68e3c0a8: Processing first storage report for DS-f0e83c57-8a8f-4c48-b9a6-210c0d50d182 from datanode DatanodeRegistration(127.0.0.1:36147, datanodeUuid=53f1d793-59d3-41db-a262-24c1bd191ae5, infoPort=34923, infoSecurePort=0, ipcPort=35307, storageInfo=lv=-57;cid=testClusterID;nsid=1017971573;c=1731736195079) 2024-11-16T05:49:55,458 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x59afaa7303af9f0c with lease ID 0xa926067e68e3c0a8: from storage DS-f0e83c57-8a8f-4c48-b9a6-210c0d50d182 node DatanodeRegistration(127.0.0.1:36147, datanodeUuid=53f1d793-59d3-41db-a262-24c1bd191ae5, infoPort=34923, infoSecurePort=0, ipcPort=35307, storageInfo=lv=-57;cid=testClusterID;nsid=1017971573;c=1731736195079), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:49:55,509 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@506744b5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/java.io.tmpdir/jetty-localhost-42877-hadoop-hdfs-3_4_1-tests_jar-_-any-17228513769692662668/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:49:55,510 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7c0fa0ef{HTTP/1.1, (http/1.1)}{localhost:42877} 2024-11-16T05:49:55,510 INFO [Time-limited test {}] server.Server(415): Started @237208ms 2024-11-16T05:49:55,511 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T05:49:55,523 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:55,567 WARN [Thread-1980 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/cluster_f441b136-a34d-58d2-7eaa-cb3df2d592f5/data/data3/current/BP-314551495-172.17.0.2-1731736195079/current, will proceed with Du for space computation calculation, 2024-11-16T05:49:55,567 WARN [Thread-1981 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/cluster_f441b136-a34d-58d2-7eaa-cb3df2d592f5/data/data4/current/BP-314551495-172.17.0.2-1731736195079/current, will proceed with Du for space computation calculation, 2024-11-16T05:49:55,583 WARN [Thread-1969 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T05:49:55,585 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x81462fa86ee59e48 with lease ID 0xa926067e68e3c0a9: Processing first storage report for DS-8e1e1687-f658-4d77-ae1d-38897fc1a109 from datanode DatanodeRegistration(127.0.0.1:36583, datanodeUuid=ea03aaf1-ba5c-48cc-ac5a-cbf7f546781f, infoPort=45045, infoSecurePort=0, ipcPort=43053, storageInfo=lv=-57;cid=testClusterID;nsid=1017971573;c=1731736195079) 2024-11-16T05:49:55,585 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x81462fa86ee59e48 with lease ID 0xa926067e68e3c0a9: from storage DS-8e1e1687-f658-4d77-ae1d-38897fc1a109 node DatanodeRegistration(127.0.0.1:36583, datanodeUuid=ea03aaf1-ba5c-48cc-ac5a-cbf7f546781f, infoPort=45045, infoSecurePort=0, ipcPort=43053, storageInfo=lv=-57;cid=testClusterID;nsid=1017971573;c=1731736195079), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:49:55,585 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x81462fa86ee59e48 with lease ID 0xa926067e68e3c0a9: Processing first storage report for DS-161d9785-b53b-4ed1-a4d4-cb5b4df468f7 from datanode DatanodeRegistration(127.0.0.1:36583, datanodeUuid=ea03aaf1-ba5c-48cc-ac5a-cbf7f546781f, infoPort=45045, infoSecurePort=0, ipcPort=43053, storageInfo=lv=-57;cid=testClusterID;nsid=1017971573;c=1731736195079) 2024-11-16T05:49:55,585 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x81462fa86ee59e48 with lease ID 0xa926067e68e3c0a9: from storage DS-161d9785-b53b-4ed1-a4d4-cb5b4df468f7 node DatanodeRegistration(127.0.0.1:36583, datanodeUuid=ea03aaf1-ba5c-48cc-ac5a-cbf7f546781f, infoPort=45045, infoSecurePort=0, ipcPort=43053, storageInfo=lv=-57;cid=testClusterID;nsid=1017971573;c=1731736195079), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:49:55,635 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e 2024-11-16T05:49:55,640 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/cluster_f441b136-a34d-58d2-7eaa-cb3df2d592f5/zookeeper_0, clientPort=50868, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/cluster_f441b136-a34d-58d2-7eaa-cb3df2d592f5/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/cluster_f441b136-a34d-58d2-7eaa-cb3df2d592f5/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T05:49:55,642 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50868 2024-11-16T05:49:55,642 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:49:55,644 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:49:55,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741825_1001 (size=7) 2024-11-16T05:49:55,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741825_1001 (size=7) 2024-11-16T05:49:55,654 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869 with version=8 2024-11-16T05:49:55,654 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/hbase-staging 2024-11-16T05:49:55,655 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3456ee6a3164:0 server-side Connection retries=45 2024-11-16T05:49:55,655 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T05:49:55,656 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T05:49:55,656 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T05:49:55,656 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T05:49:55,656 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T05:49:55,656 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T05:49:55,656 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T05:49:55,656 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40857 2024-11-16T05:49:55,657 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40857 connecting to ZooKeeper ensemble=127.0.0.1:50868 2024-11-16T05:49:55,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:408570x0, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T05:49:55,663 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40857-0x1004714aea50000 connected 2024-11-16T05:49:55,676 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:49:55,677 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:49:55,680 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40857-0x1004714aea50000, quorum=127.0.0.1:50868, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T05:49:55,680 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869, hbase.cluster.distributed=false 2024-11-16T05:49:55,683 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40857-0x1004714aea50000, quorum=127.0.0.1:50868, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T05:49:55,684 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40857 2024-11-16T05:49:55,684 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40857 2024-11-16T05:49:55,684 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40857 2024-11-16T05:49:55,685 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40857 2024-11-16T05:49:55,685 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40857 2024-11-16T05:49:55,700 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3456ee6a3164:0 server-side Connection retries=45 2024-11-16T05:49:55,700 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T05:49:55,700 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T05:49:55,700 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T05:49:55,700 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T05:49:55,700 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T05:49:55,700 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T05:49:55,700 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T05:49:55,701 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46129 2024-11-16T05:49:55,702 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46129 connecting to ZooKeeper ensemble=127.0.0.1:50868 2024-11-16T05:49:55,703 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:49:55,704 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:49:55,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:461290x0, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T05:49:55,707 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46129-0x1004714aea50001 connected 2024-11-16T05:49:55,707 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46129-0x1004714aea50001, quorum=127.0.0.1:50868, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T05:49:55,707 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T05:49:55,709 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T05:49:55,709 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46129-0x1004714aea50001, quorum=127.0.0.1:50868, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T05:49:55,710 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46129-0x1004714aea50001, quorum=127.0.0.1:50868, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T05:49:55,713 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46129 2024-11-16T05:49:55,714 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46129 2024-11-16T05:49:55,714 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46129 2024-11-16T05:49:55,715 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46129 2024-11-16T05:49:55,715 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46129 2024-11-16T05:49:55,725 DEBUG [M:0;3456ee6a3164:40857 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3456ee6a3164:40857 2024-11-16T05:49:55,725 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3456ee6a3164,40857,1731736195655 2024-11-16T05:49:55,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46129-0x1004714aea50001, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T05:49:55,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40857-0x1004714aea50000, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T05:49:55,727 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40857-0x1004714aea50000, quorum=127.0.0.1:50868, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3456ee6a3164,40857,1731736195655 2024-11-16T05:49:55,728 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46129-0x1004714aea50001, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T05:49:55,728 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46129-0x1004714aea50001, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:49:55,728 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40857-0x1004714aea50000, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:49:55,728 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40857-0x1004714aea50000, quorum=127.0.0.1:50868, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T05:49:55,729 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3456ee6a3164,40857,1731736195655 from backup master directory 2024-11-16T05:49:55,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40857-0x1004714aea50000, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3456ee6a3164,40857,1731736195655 2024-11-16T05:49:55,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46129-0x1004714aea50001, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T05:49:55,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40857-0x1004714aea50000, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T05:49:55,729 WARN [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T05:49:55,729 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3456ee6a3164,40857,1731736195655 2024-11-16T05:49:55,734 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/hbase.id] with ID: d2e938dc-ac10-49c7-9896-797540c38930 2024-11-16T05:49:55,734 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/.tmp/hbase.id 2024-11-16T05:49:55,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741826_1002 (size=42) 2024-11-16T05:49:55,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741826_1002 (size=42) 2024-11-16T05:49:55,741 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/.tmp/hbase.id]:[hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/hbase.id] 2024-11-16T05:49:55,752 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:49:55,752 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T05:49:55,753 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-16T05:49:55,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46129-0x1004714aea50001, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:49:55,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40857-0x1004714aea50000, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:49:55,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741827_1003 (size=196) 2024-11-16T05:49:55,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741827_1003 (size=196) 2024-11-16T05:49:55,763 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T05:49:55,764 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T05:49:55,764 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T05:49:55,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741828_1004 (size=1189) 2024-11-16T05:49:55,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741828_1004 (size=1189) 2024-11-16T05:49:55,773 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/MasterData/data/master/store 2024-11-16T05:49:55,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741829_1005 (size=34) 2024-11-16T05:49:55,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741829_1005 (size=34) 2024-11-16T05:49:55,780 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:49:55,780 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T05:49:55,780 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:49:55,780 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:49:55,780 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T05:49:55,780 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:49:55,780 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:49:55,780 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731736195780Disabling compacts and flushes for region at 1731736195780Disabling writes for close at 1731736195780Writing region close event to WAL at 1731736195780Closed at 1731736195780 2024-11-16T05:49:55,781 WARN [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/MasterData/data/master/store/.initializing 2024-11-16T05:49:55,781 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/MasterData/WALs/3456ee6a3164,40857,1731736195655 2024-11-16T05:49:55,784 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3456ee6a3164%2C40857%2C1731736195655, suffix=, logDir=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/MasterData/WALs/3456ee6a3164,40857,1731736195655, archiveDir=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/MasterData/oldWALs, maxLogs=10 2024-11-16T05:49:55,785 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C40857%2C1731736195655.1731736195784 2024-11-16T05:49:55,789 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/MasterData/WALs/3456ee6a3164,40857,1731736195655/3456ee6a3164%2C40857%2C1731736195655.1731736195784 2024-11-16T05:49:55,790 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34923:34923),(127.0.0.1/127.0.0.1:45045:45045)] 2024-11-16T05:49:55,791 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T05:49:55,791 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:49:55,791 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:49:55,791 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:49:55,792 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:49:55,793 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T05:49:55,793 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:49:55,794 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:49:55,794 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:49:55,795 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T05:49:55,795 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:49:55,796 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T05:49:55,796 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:49:55,797 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T05:49:55,797 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:49:55,797 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T05:49:55,797 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:49:55,798 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T05:49:55,799 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:49:55,799 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T05:49:55,799 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:49:55,800 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:49:55,800 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:49:55,801 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:49:55,801 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:49:55,801 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T05:49:55,802 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:49:55,804 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T05:49:55,804 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=772363, jitterRate=-0.017890408635139465}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T05:49:55,805 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731736195791Initializing all the Stores at 1731736195792 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736195792Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736195792Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736195792Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736195792Cleaning up temporary data from old regions at 1731736195801 (+9 ms)Region opened successfully at 1731736195805 (+4 ms) 2024-11-16T05:49:55,806 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T05:49:55,808 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3684b39f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3456ee6a3164/172.17.0.2:0 2024-11-16T05:49:55,809 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T05:49:55,809 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T05:49:55,809 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T05:49:55,809 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T05:49:55,810 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T05:49:55,810 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T05:49:55,810 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T05:49:55,812 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T05:49:55,813 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40857-0x1004714aea50000, quorum=127.0.0.1:50868, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T05:49:55,815 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T05:49:55,815 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T05:49:55,816 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40857-0x1004714aea50000, quorum=127.0.0.1:50868, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T05:49:55,816 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T05:49:55,817 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T05:49:55,818 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40857-0x1004714aea50000, quorum=127.0.0.1:50868, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T05:49:55,818 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T05:49:55,819 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40857-0x1004714aea50000, quorum=127.0.0.1:50868, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T05:49:55,820 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T05:49:55,821 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40857-0x1004714aea50000, quorum=127.0.0.1:50868, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T05:49:55,822 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T05:49:55,823 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40857-0x1004714aea50000, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T05:49:55,823 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40857-0x1004714aea50000, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:49:55,824 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3456ee6a3164,40857,1731736195655, sessionid=0x1004714aea50000, setting cluster-up flag (Was=false) 2024-11-16T05:49:55,824 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46129-0x1004714aea50001, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T05:49:55,824 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46129-0x1004714aea50001, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:49:55,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40857-0x1004714aea50000, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:49:55,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46129-0x1004714aea50001, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:49:55,827 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T05:49:55,828 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3456ee6a3164,40857,1731736195655 2024-11-16T05:49:55,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40857-0x1004714aea50000, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:49:55,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46129-0x1004714aea50001, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:49:55,832 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T05:49:55,833 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3456ee6a3164,40857,1731736195655 2024-11-16T05:49:55,834 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T05:49:55,836 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T05:49:55,837 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T05:49:55,837 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T05:49:55,837 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3456ee6a3164,40857,1731736195655 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T05:49:55,838 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3456ee6a3164:0, corePoolSize=5, maxPoolSize=5 2024-11-16T05:49:55,839 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3456ee6a3164:0, corePoolSize=5, maxPoolSize=5 2024-11-16T05:49:55,839 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3456ee6a3164:0, corePoolSize=5, maxPoolSize=5 2024-11-16T05:49:55,839 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3456ee6a3164:0, corePoolSize=5, maxPoolSize=5 2024-11-16T05:49:55,839 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3456ee6a3164:0, corePoolSize=10, maxPoolSize=10 2024-11-16T05:49:55,839 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:49:55,839 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3456ee6a3164:0, corePoolSize=2, maxPoolSize=2 2024-11-16T05:49:55,839 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:49:55,842 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T05:49:55,842 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T05:49:55,843 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:49:55,844 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T05:49:55,845 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731736225845 2024-11-16T05:49:55,845 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T05:49:55,845 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T05:49:55,845 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T05:49:55,845 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T05:49:55,845 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T05:49:55,846 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T05:49:55,848 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:55,848 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T05:49:55,848 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T05:49:55,848 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T05:49:55,849 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T05:49:55,849 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T05:49:55,849 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.large.0-1731736195849,5,FailOnTimeoutGroup] 2024-11-16T05:49:55,850 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.small.0-1731736195849,5,FailOnTimeoutGroup] 2024-11-16T05:49:55,850 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:55,850 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T05:49:55,851 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:55,851 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:55,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741831_1007 (size=1321) 2024-11-16T05:49:55,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741831_1007 (size=1321) 2024-11-16T05:49:55,856 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T05:49:55,856 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869 2024-11-16T05:49:55,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741832_1008 (size=32) 2024-11-16T05:49:55,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741832_1008 (size=32) 2024-11-16T05:49:55,868 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:49:55,869 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T05:49:55,870 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T05:49:55,870 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:49:55,871 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:49:55,871 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T05:49:55,872 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T05:49:55,872 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:49:55,872 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:49:55,872 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T05:49:55,873 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T05:49:55,873 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:49:55,874 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:49:55,874 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T05:49:55,875 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T05:49:55,875 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:49:55,875 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:49:55,875 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T05:49:55,876 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/hbase/meta/1588230740 2024-11-16T05:49:55,876 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/hbase/meta/1588230740 2024-11-16T05:49:55,877 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T05:49:55,877 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T05:49:55,878 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T05:49:55,879 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T05:49:55,882 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T05:49:55,882 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=695513, jitterRate=-0.11561024188995361}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T05:49:55,883 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731736195868Initializing all the Stores at 1731736195869 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736195869Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736195869Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736195869Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736195869Cleaning up temporary data from old regions at 1731736195877 (+8 ms)Region opened successfully at 1731736195883 (+6 ms) 2024-11-16T05:49:55,883 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T05:49:55,883 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T05:49:55,883 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T05:49:55,883 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T05:49:55,883 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T05:49:55,883 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T05:49:55,884 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731736195883Disabling compacts and flushes for region at 1731736195883Disabling writes for close at 1731736195883Writing region close event to WAL at 1731736195883Closed at 1731736195883 2024-11-16T05:49:55,885 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T05:49:55,885 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T05:49:55,885 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T05:49:55,887 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T05:49:55,888 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T05:49:55,917 INFO [RS:0;3456ee6a3164:46129 {}] regionserver.HRegionServer(746): ClusterId : d2e938dc-ac10-49c7-9896-797540c38930 2024-11-16T05:49:55,917 DEBUG [RS:0;3456ee6a3164:46129 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T05:49:55,920 DEBUG [RS:0;3456ee6a3164:46129 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T05:49:55,920 DEBUG [RS:0;3456ee6a3164:46129 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T05:49:55,923 DEBUG [RS:0;3456ee6a3164:46129 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T05:49:55,923 DEBUG [RS:0;3456ee6a3164:46129 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a91dde2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3456ee6a3164/172.17.0.2:0 2024-11-16T05:49:55,938 DEBUG [RS:0;3456ee6a3164:46129 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3456ee6a3164:46129 2024-11-16T05:49:55,939 INFO [RS:0;3456ee6a3164:46129 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T05:49:55,939 INFO [RS:0;3456ee6a3164:46129 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T05:49:55,939 DEBUG [RS:0;3456ee6a3164:46129 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T05:49:55,939 INFO [RS:0;3456ee6a3164:46129 {}] regionserver.HRegionServer(2659): reportForDuty to master=3456ee6a3164,40857,1731736195655 with port=46129, startcode=1731736195700 2024-11-16T05:49:55,939 DEBUG [RS:0;3456ee6a3164:46129 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T05:49:55,941 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56477, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T05:49:55,942 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40857 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3456ee6a3164,46129,1731736195700 2024-11-16T05:49:55,942 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40857 {}] master.ServerManager(517): Registering regionserver=3456ee6a3164,46129,1731736195700 2024-11-16T05:49:55,943 DEBUG [RS:0;3456ee6a3164:46129 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869 2024-11-16T05:49:55,943 DEBUG [RS:0;3456ee6a3164:46129 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39651 2024-11-16T05:49:55,943 DEBUG [RS:0;3456ee6a3164:46129 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T05:49:55,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40857-0x1004714aea50000, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T05:49:55,945 DEBUG [RS:0;3456ee6a3164:46129 {}] zookeeper.ZKUtil(111): regionserver:46129-0x1004714aea50001, quorum=127.0.0.1:50868, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3456ee6a3164,46129,1731736195700 2024-11-16T05:49:55,945 WARN [RS:0;3456ee6a3164:46129 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T05:49:55,945 INFO [RS:0;3456ee6a3164:46129 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T05:49:55,945 DEBUG [RS:0;3456ee6a3164:46129 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/WALs/3456ee6a3164,46129,1731736195700 2024-11-16T05:49:55,946 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3456ee6a3164,46129,1731736195700] 2024-11-16T05:49:55,949 INFO [RS:0;3456ee6a3164:46129 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T05:49:55,950 INFO [RS:0;3456ee6a3164:46129 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T05:49:55,950 INFO [RS:0;3456ee6a3164:46129 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T05:49:55,950 INFO [RS:0;3456ee6a3164:46129 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:55,951 INFO [RS:0;3456ee6a3164:46129 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T05:49:55,951 INFO [RS:0;3456ee6a3164:46129 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T05:49:55,951 INFO [RS:0;3456ee6a3164:46129 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:55,952 DEBUG [RS:0;3456ee6a3164:46129 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:49:55,952 DEBUG [RS:0;3456ee6a3164:46129 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:49:55,952 DEBUG [RS:0;3456ee6a3164:46129 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:49:55,952 DEBUG [RS:0;3456ee6a3164:46129 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:49:55,952 DEBUG [RS:0;3456ee6a3164:46129 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:49:55,952 DEBUG [RS:0;3456ee6a3164:46129 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3456ee6a3164:0, corePoolSize=2, maxPoolSize=2 2024-11-16T05:49:55,952 DEBUG [RS:0;3456ee6a3164:46129 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:49:55,952 DEBUG [RS:0;3456ee6a3164:46129 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:49:55,952 DEBUG [RS:0;3456ee6a3164:46129 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:49:55,952 DEBUG [RS:0;3456ee6a3164:46129 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:49:55,952 DEBUG [RS:0;3456ee6a3164:46129 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:49:55,952 DEBUG [RS:0;3456ee6a3164:46129 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:49:55,952 DEBUG [RS:0;3456ee6a3164:46129 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0, corePoolSize=3, maxPoolSize=3 2024-11-16T05:49:55,952 DEBUG [RS:0;3456ee6a3164:46129 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0, corePoolSize=3, maxPoolSize=3 2024-11-16T05:49:55,952 INFO [RS:0;3456ee6a3164:46129 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:55,952 INFO [RS:0;3456ee6a3164:46129 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:55,952 INFO [RS:0;3456ee6a3164:46129 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:55,952 INFO [RS:0;3456ee6a3164:46129 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:55,953 INFO [RS:0;3456ee6a3164:46129 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:55,953 INFO [RS:0;3456ee6a3164:46129 {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,46129,1731736195700-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T05:49:55,966 INFO [RS:0;3456ee6a3164:46129 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T05:49:55,966 INFO [RS:0;3456ee6a3164:46129 {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,46129,1731736195700-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:55,966 INFO [RS:0;3456ee6a3164:46129 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:55,966 INFO [RS:0;3456ee6a3164:46129 {}] regionserver.Replication(171): 3456ee6a3164,46129,1731736195700 started 2024-11-16T05:49:55,978 INFO [RS:0;3456ee6a3164:46129 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:55,978 INFO [RS:0;3456ee6a3164:46129 {}] regionserver.HRegionServer(1482): Serving as 3456ee6a3164,46129,1731736195700, RpcServer on 3456ee6a3164/172.17.0.2:46129, sessionid=0x1004714aea50001 2024-11-16T05:49:55,978 DEBUG [RS:0;3456ee6a3164:46129 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T05:49:55,978 DEBUG [RS:0;3456ee6a3164:46129 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3456ee6a3164,46129,1731736195700 2024-11-16T05:49:55,978 DEBUG [RS:0;3456ee6a3164:46129 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3456ee6a3164,46129,1731736195700' 2024-11-16T05:49:55,978 DEBUG [RS:0;3456ee6a3164:46129 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T05:49:55,979 DEBUG [RS:0;3456ee6a3164:46129 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T05:49:55,979 DEBUG [RS:0;3456ee6a3164:46129 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T05:49:55,979 DEBUG [RS:0;3456ee6a3164:46129 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T05:49:55,979 DEBUG [RS:0;3456ee6a3164:46129 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3456ee6a3164,46129,1731736195700 2024-11-16T05:49:55,979 DEBUG [RS:0;3456ee6a3164:46129 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3456ee6a3164,46129,1731736195700' 2024-11-16T05:49:55,979 DEBUG [RS:0;3456ee6a3164:46129 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T05:49:55,980 DEBUG [RS:0;3456ee6a3164:46129 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T05:49:55,980 DEBUG [RS:0;3456ee6a3164:46129 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T05:49:55,980 INFO [RS:0;3456ee6a3164:46129 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T05:49:55,980 INFO [RS:0;3456ee6a3164:46129 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T05:49:56,038 WARN [3456ee6a3164:40857 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-16T05:49:56,084 INFO [RS:0;3456ee6a3164:46129 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3456ee6a3164%2C46129%2C1731736195700, suffix=, logDir=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/WALs/3456ee6a3164,46129,1731736195700, archiveDir=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/oldWALs, maxLogs=32 2024-11-16T05:49:56,085 INFO [RS:0;3456ee6a3164:46129 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C46129%2C1731736195700.1731736196085 2024-11-16T05:49:56,097 INFO [RS:0;3456ee6a3164:46129 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/WALs/3456ee6a3164,46129,1731736195700/3456ee6a3164%2C46129%2C1731736195700.1731736196085 2024-11-16T05:49:56,101 DEBUG [RS:0;3456ee6a3164:46129 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34923:34923),(127.0.0.1/127.0.0.1:45045:45045)] 2024-11-16T05:49:56,288 DEBUG [3456ee6a3164:40857 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T05:49:56,289 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3456ee6a3164,46129,1731736195700 2024-11-16T05:49:56,291 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3456ee6a3164,46129,1731736195700, state=OPENING 2024-11-16T05:49:56,293 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T05:49:56,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40857-0x1004714aea50000, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:49:56,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46129-0x1004714aea50001, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:49:56,296 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T05:49:56,296 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3456ee6a3164,46129,1731736195700}] 2024-11-16T05:49:56,296 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T05:49:56,296 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T05:49:56,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:56,451 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T05:49:56,453 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50401, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T05:49:56,458 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T05:49:56,458 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T05:49:56,460 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3456ee6a3164%2C46129%2C1731736195700.meta, suffix=.meta, logDir=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/WALs/3456ee6a3164,46129,1731736195700, archiveDir=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/oldWALs, maxLogs=32 2024-11-16T05:49:56,461 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C46129%2C1731736195700.meta.1731736196461.meta 2024-11-16T05:49:56,468 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/WALs/3456ee6a3164,46129,1731736195700/3456ee6a3164%2C46129%2C1731736195700.meta.1731736196461.meta 2024-11-16T05:49:56,469 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45045:45045),(127.0.0.1/127.0.0.1:34923:34923)] 2024-11-16T05:49:56,471 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T05:49:56,471 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T05:49:56,471 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T05:49:56,472 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T05:49:56,472 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T05:49:56,472 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:49:56,472 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T05:49:56,472 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T05:49:56,474 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T05:49:56,475 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T05:49:56,475 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:49:56,476 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:49:56,476 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T05:49:56,477 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T05:49:56,478 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:49:56,478 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:49:56,479 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T05:49:56,480 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T05:49:56,480 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:49:56,480 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:49:56,480 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T05:49:56,481 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T05:49:56,481 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:49:56,482 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:49:56,482 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T05:49:56,483 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/hbase/meta/1588230740 2024-11-16T05:49:56,485 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/hbase/meta/1588230740 2024-11-16T05:49:56,486 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T05:49:56,486 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T05:49:56,487 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T05:49:56,488 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T05:49:56,489 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=808555, jitterRate=0.028131186962127686}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T05:49:56,489 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T05:49:56,489 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731736196472Writing region info on filesystem at 1731736196472Initializing all the Stores at 1731736196473 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736196473Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736196474 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736196474Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736196474Cleaning up temporary data from old regions at 1731736196486 (+12 ms)Running coprocessor post-open hooks at 1731736196489 (+3 ms)Region opened successfully at 1731736196489 2024-11-16T05:49:56,490 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731736196450 2024-11-16T05:49:56,492 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T05:49:56,492 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T05:49:56,493 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,46129,1731736195700 2024-11-16T05:49:56,494 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3456ee6a3164,46129,1731736195700, state=OPEN 2024-11-16T05:49:56,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40857-0x1004714aea50000, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T05:49:56,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46129-0x1004714aea50001, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T05:49:56,499 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3456ee6a3164,46129,1731736195700 2024-11-16T05:49:56,499 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T05:49:56,499 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T05:49:56,501 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T05:49:56,501 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3456ee6a3164,46129,1731736195700 in 203 msec 2024-11-16T05:49:56,504 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T05:49:56,504 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 616 msec 2024-11-16T05:49:56,504 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T05:49:56,504 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T05:49:56,506 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T05:49:56,506 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,46129,1731736195700, seqNum=-1] 2024-11-16T05:49:56,506 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T05:49:56,507 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51543, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T05:49:56,512 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 675 msec 2024-11-16T05:49:56,512 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731736196512, completionTime=-1 2024-11-16T05:49:56,512 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T05:49:56,512 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-16T05:49:56,514 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-16T05:49:56,514 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731736256514 2024-11-16T05:49:56,514 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731736316514 2024-11-16T05:49:56,514 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-16T05:49:56,515 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,40857,1731736195655-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:56,515 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,40857,1731736195655-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:56,515 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,40857,1731736195655-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:56,515 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3456ee6a3164:40857, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:56,515 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:56,515 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:56,517 DEBUG [master/3456ee6a3164:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T05:49:56,519 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.790sec 2024-11-16T05:49:56,519 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T05:49:56,519 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T05:49:56,519 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T05:49:56,519 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T05:49:56,519 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T05:49:56,519 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,40857,1731736195655-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T05:49:56,519 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,40857,1731736195655-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T05:49:56,521 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T05:49:56,521 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T05:49:56,521 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,40857,1731736195655-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:49:56,523 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:56,617 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e3716c2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T05:49:56,617 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,40857,-1 for getting cluster id 2024-11-16T05:49:56,617 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T05:49:56,619 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd2e938dc-ac10-49c7-9896-797540c38930' 2024-11-16T05:49:56,619 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T05:49:56,619 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d2e938dc-ac10-49c7-9896-797540c38930" 2024-11-16T05:49:56,620 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66da33dc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T05:49:56,620 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,40857,-1] 2024-11-16T05:49:56,620 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T05:49:56,620 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:49:56,621 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50278, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T05:49:56,622 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@579c8763, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T05:49:56,623 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T05:49:56,624 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,46129,1731736195700, seqNum=-1] 2024-11-16T05:49:56,624 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T05:49:56,626 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57954, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T05:49:56,628 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3456ee6a3164,40857,1731736195655 2024-11-16T05:49:56,629 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:49:56,631 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T05:49:56,631 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-16T05:49:56,632 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 3456ee6a3164,40857,1731736195655 2024-11-16T05:49:56,632 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5e6faf81 2024-11-16T05:49:56,632 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-16T05:49:56,633 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50286, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-16T05:49:56,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40857 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-16T05:49:56,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40857 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-16T05:49:56,634 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40857 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T05:49:56,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40857 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-16T05:49:56,637 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T05:49:56,637 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:49:56,637 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40857 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-16T05:49:56,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40857 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T05:49:56,638 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T05:49:56,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741835_1011 (size=381) 2024-11-16T05:49:56,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741835_1011 (size=381) 2024-11-16T05:49:56,645 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ee760dfd2eb02f6e118668b4ae1eee46, NAME => 'TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869 2024-11-16T05:49:56,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741836_1012 (size=64) 2024-11-16T05:49:56,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741836_1012 (size=64) 2024-11-16T05:49:56,653 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:49:56,654 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing ee760dfd2eb02f6e118668b4ae1eee46, disabling compactions & flushes 2024-11-16T05:49:56,654 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46. 2024-11-16T05:49:56,654 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46. 2024-11-16T05:49:56,654 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46. after waiting 0 ms 2024-11-16T05:49:56,654 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46. 2024-11-16T05:49:56,654 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46. 2024-11-16T05:49:56,654 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for ee760dfd2eb02f6e118668b4ae1eee46: Waiting for close lock at 1731736196654Disabling compacts and flushes for region at 1731736196654Disabling writes for close at 1731736196654Writing region close event to WAL at 1731736196654Closed at 1731736196654 2024-11-16T05:49:56,656 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T05:49:56,656 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731736196656"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731736196656"}]},"ts":"1731736196656"} 2024-11-16T05:49:56,658 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-16T05:49:56,659 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T05:49:56,660 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731736196659"}]},"ts":"1731736196659"} 2024-11-16T05:49:56,662 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-16T05:49:56,662 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ee760dfd2eb02f6e118668b4ae1eee46, ASSIGN}] 2024-11-16T05:49:56,664 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ee760dfd2eb02f6e118668b4ae1eee46, ASSIGN 2024-11-16T05:49:56,665 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ee760dfd2eb02f6e118668b4ae1eee46, ASSIGN; state=OFFLINE, location=3456ee6a3164,46129,1731736195700; forceNewPlan=false, retain=false 2024-11-16T05:49:56,816 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ee760dfd2eb02f6e118668b4ae1eee46, regionState=OPENING, regionLocation=3456ee6a3164,46129,1731736195700 2024-11-16T05:49:56,821 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ee760dfd2eb02f6e118668b4ae1eee46, ASSIGN because future has completed 2024-11-16T05:49:56,822 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ee760dfd2eb02f6e118668b4ae1eee46, server=3456ee6a3164,46129,1731736195700}] 2024-11-16T05:49:56,980 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46. 2024-11-16T05:49:56,980 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => ee760dfd2eb02f6e118668b4ae1eee46, NAME => 'TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46.', STARTKEY => '', ENDKEY => ''} 2024-11-16T05:49:56,980 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling ee760dfd2eb02f6e118668b4ae1eee46 2024-11-16T05:49:56,980 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:49:56,981 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for ee760dfd2eb02f6e118668b4ae1eee46 2024-11-16T05:49:56,981 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for ee760dfd2eb02f6e118668b4ae1eee46 2024-11-16T05:49:56,983 INFO [StoreOpener-ee760dfd2eb02f6e118668b4ae1eee46-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region ee760dfd2eb02f6e118668b4ae1eee46 2024-11-16T05:49:56,985 INFO [StoreOpener-ee760dfd2eb02f6e118668b4ae1eee46-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ee760dfd2eb02f6e118668b4ae1eee46 columnFamilyName info 2024-11-16T05:49:56,986 DEBUG [StoreOpener-ee760dfd2eb02f6e118668b4ae1eee46-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:49:56,986 INFO [StoreOpener-ee760dfd2eb02f6e118668b4ae1eee46-1 {}] regionserver.HStore(327): Store=ee760dfd2eb02f6e118668b4ae1eee46/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T05:49:56,987 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for ee760dfd2eb02f6e118668b4ae1eee46 2024-11-16T05:49:56,988 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46 2024-11-16T05:49:56,989 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46 2024-11-16T05:49:56,990 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for ee760dfd2eb02f6e118668b4ae1eee46 2024-11-16T05:49:56,990 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for ee760dfd2eb02f6e118668b4ae1eee46 2024-11-16T05:49:56,993 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for ee760dfd2eb02f6e118668b4ae1eee46 2024-11-16T05:49:56,997 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T05:49:56,997 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened ee760dfd2eb02f6e118668b4ae1eee46; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=808970, jitterRate=0.028658926486968994}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T05:49:56,998 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ee760dfd2eb02f6e118668b4ae1eee46 2024-11-16T05:49:56,998 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for ee760dfd2eb02f6e118668b4ae1eee46: Running coprocessor pre-open hook at 1731736196981Writing region info on filesystem at 1731736196981Initializing all the Stores at 1731736196982 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736196982Cleaning up temporary data from old regions at 1731736196990 (+8 ms)Running coprocessor post-open hooks at 1731736196998 (+8 ms)Region opened successfully at 1731736196998 2024-11-16T05:49:57,000 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46., pid=6, masterSystemTime=1731736196974 2024-11-16T05:49:57,002 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46. 2024-11-16T05:49:57,002 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46. 2024-11-16T05:49:57,003 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ee760dfd2eb02f6e118668b4ae1eee46, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,46129,1731736195700 2024-11-16T05:49:57,006 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ee760dfd2eb02f6e118668b4ae1eee46, server=3456ee6a3164,46129,1731736195700 because future has completed 2024-11-16T05:49:57,012 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-16T05:49:57,012 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure ee760dfd2eb02f6e118668b4ae1eee46, server=3456ee6a3164,46129,1731736195700 in 186 msec 2024-11-16T05:49:57,017 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-16T05:49:57,017 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ee760dfd2eb02f6e118668b4ae1eee46, ASSIGN in 350 msec 2024-11-16T05:49:57,018 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T05:49:57,019 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731736197019"}]},"ts":"1731736197019"} 2024-11-16T05:49:57,022 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-16T05:49:57,023 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T05:49:57,026 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 390 msec 2024-11-16T05:49:57,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:57,524 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:58,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:58,525 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:59,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:59,526 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:49:59,576 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:59,576 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:59,577 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:59,577 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:59,577 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:59,578 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:59,579 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:59,580 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:59,603 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:59,603 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:59,603 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:59,603 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:59,603 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:59,603 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:59,606 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:59,607 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:59,607 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:49:59,609 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:00,113 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T05:50:00,114 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:00,114 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:00,115 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:00,115 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:00,115 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:00,115 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:00,116 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:00,116 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:00,152 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:00,153 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:00,153 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:00,154 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:00,154 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:00,155 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:00,162 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:00,162 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:00,163 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:00,168 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:00,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:00,527 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:01,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:01,527 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:01,949 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T05:50:01,949 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-16T05:50:02,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:02,528 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:03,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:03,529 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:04,305 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-16T05:50:04,305 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-16T05:50:04,305 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T05:50:04,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:04,530 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:05,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:05,531 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:06,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:06,532 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:06,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40857 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T05:50:06,649 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-16T05:50:06,649 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-16T05:50:06,653 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-16T05:50:06,653 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46. 2024-11-16T05:50:06,656 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46., hostname=3456ee6a3164,46129,1731736195700, seqNum=2] 2024-11-16T05:50:06,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46129 {}] regionserver.HRegion(8855): Flush requested on ee760dfd2eb02f6e118668b4ae1eee46 2024-11-16T05:50:06,675 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ee760dfd2eb02f6e118668b4ae1eee46 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T05:50:06,696 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/9e8ab5fdacd8467981015790afe2aff8 is 1080, key is row0001/info:/1731736206657/Put/seqid=0 2024-11-16T05:50:06,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741837_1013 (size=12509) 2024-11-16T05:50:06,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741837_1013 (size=12509) 2024-11-16T05:50:06,703 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/9e8ab5fdacd8467981015790afe2aff8 2024-11-16T05:50:06,711 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/9e8ab5fdacd8467981015790afe2aff8 as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/9e8ab5fdacd8467981015790afe2aff8 2024-11-16T05:50:06,716 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/9e8ab5fdacd8467981015790afe2aff8, entries=7, sequenceid=11, filesize=12.2 K 2024-11-16T05:50:06,717 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=21.02 KB/21520 for ee760dfd2eb02f6e118668b4ae1eee46 in 42ms, sequenceid=11, compaction requested=false 2024-11-16T05:50:06,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ee760dfd2eb02f6e118668b4ae1eee46: 2024-11-16T05:50:06,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46129 {}] regionserver.HRegion(8855): Flush requested on ee760dfd2eb02f6e118668b4ae1eee46 2024-11-16T05:50:06,718 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ee760dfd2eb02f6e118668b4ae1eee46 1/1 column families, dataSize=22.07 KB heapSize=23.88 KB 2024-11-16T05:50:06,722 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/c847fb20aa7f4937a1738d84085bf76b is 1080, key is row0008/info:/1731736206677/Put/seqid=0 2024-11-16T05:50:06,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741838_1014 (size=27607) 2024-11-16T05:50:06,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741838_1014 (size=27607) 2024-11-16T05:50:06,727 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.07 KB at sequenceid=35 (bloomFilter=true), to=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/c847fb20aa7f4937a1738d84085bf76b 2024-11-16T05:50:06,732 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/c847fb20aa7f4937a1738d84085bf76b as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/c847fb20aa7f4937a1738d84085bf76b 2024-11-16T05:50:06,737 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/c847fb20aa7f4937a1738d84085bf76b, entries=21, sequenceid=35, filesize=27.0 K 2024-11-16T05:50:06,738 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~22.07 KB/22596, heapSize ~23.86 KB/24432, currentSize=4.20 KB/4304 for ee760dfd2eb02f6e118668b4ae1eee46 in 20ms, sequenceid=35, compaction requested=false 2024-11-16T05:50:06,738 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ee760dfd2eb02f6e118668b4ae1eee46: 2024-11-16T05:50:06,738 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.2 K, sizeToCheck=16.0 K 2024-11-16T05:50:06,738 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:50:06,739 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/c847fb20aa7f4937a1738d84085bf76b because midkey is the same as first or last row 2024-11-16T05:50:07,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:07,533 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:08,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:08,535 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:08,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46129 {}] regionserver.HRegion(8855): Flush requested on ee760dfd2eb02f6e118668b4ae1eee46 2024-11-16T05:50:08,734 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ee760dfd2eb02f6e118668b4ae1eee46 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T05:50:08,740 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/2d36e9519b34493dae68d01e5b5580ea is 1080, key is row0029/info:/1731736206719/Put/seqid=0 2024-11-16T05:50:08,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741839_1015 (size=12509) 2024-11-16T05:50:08,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741839_1015 (size=12509) 2024-11-16T05:50:08,748 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/2d36e9519b34493dae68d01e5b5580ea 2024-11-16T05:50:08,754 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/2d36e9519b34493dae68d01e5b5580ea as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/2d36e9519b34493dae68d01e5b5580ea 2024-11-16T05:50:08,761 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/2d36e9519b34493dae68d01e5b5580ea, entries=7, sequenceid=45, filesize=12.2 K 2024-11-16T05:50:08,761 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for ee760dfd2eb02f6e118668b4ae1eee46 in 27ms, sequenceid=45, compaction requested=true 2024-11-16T05:50:08,762 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ee760dfd2eb02f6e118668b4ae1eee46: 2024-11-16T05:50:08,762 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=51.4 K, sizeToCheck=16.0 K 2024-11-16T05:50:08,762 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:50:08,762 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/c847fb20aa7f4937a1738d84085bf76b because midkey is the same as first or last row 2024-11-16T05:50:08,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ee760dfd2eb02f6e118668b4ae1eee46:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T05:50:08,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T05:50:08,762 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T05:50:08,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46129 {}] regionserver.HRegion(8855): Flush requested on ee760dfd2eb02f6e118668b4ae1eee46 2024-11-16T05:50:08,763 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ee760dfd2eb02f6e118668b4ae1eee46 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-16T05:50:08,763 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 52625 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T05:50:08,764 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HStore(1541): ee760dfd2eb02f6e118668b4ae1eee46/info is initiating minor compaction (all files) 2024-11-16T05:50:08,764 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ee760dfd2eb02f6e118668b4ae1eee46/info in TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46. 2024-11-16T05:50:08,764 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/9e8ab5fdacd8467981015790afe2aff8, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/c847fb20aa7f4937a1738d84085bf76b, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/2d36e9519b34493dae68d01e5b5580ea] into tmpdir=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp, totalSize=51.4 K 2024-11-16T05:50:08,764 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9e8ab5fdacd8467981015790afe2aff8, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731736206657 2024-11-16T05:50:08,765 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.Compactor(225): Compacting c847fb20aa7f4937a1738d84085bf76b, keycount=21, bloomtype=ROW, size=27.0 K, encoding=NONE, compression=NONE, seqNum=35, earliestPutTs=1731736206677 2024-11-16T05:50:08,765 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2d36e9519b34493dae68d01e5b5580ea, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731736206719 2024-11-16T05:50:08,767 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/3b8710c33d894279af2ec02670779446 is 1080, key is row0036/info:/1731736208735/Put/seqid=0 2024-11-16T05:50:08,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741840_1016 (size=17894) 2024-11-16T05:50:08,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741840_1016 (size=17894) 2024-11-16T05:50:08,773 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/3b8710c33d894279af2ec02670779446 2024-11-16T05:50:08,780 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ee760dfd2eb02f6e118668b4ae1eee46#info#compaction#59 average throughput is 17.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T05:50:08,781 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/18945e0387b64ddb92f1f6d2af522ec4 is 1080, key is row0001/info:/1731736206657/Put/seqid=0 2024-11-16T05:50:08,781 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/3b8710c33d894279af2ec02670779446 as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/3b8710c33d894279af2ec02670779446 2024-11-16T05:50:08,787 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/3b8710c33d894279af2ec02670779446, entries=12, sequenceid=60, filesize=17.5 K 2024-11-16T05:50:08,788 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for ee760dfd2eb02f6e118668b4ae1eee46 in 24ms, sequenceid=60, compaction requested=false 2024-11-16T05:50:08,788 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ee760dfd2eb02f6e118668b4ae1eee46: 2024-11-16T05:50:08,788 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=68.9 K, sizeToCheck=16.0 K 2024-11-16T05:50:08,788 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:50:08,788 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/c847fb20aa7f4937a1738d84085bf76b because midkey is the same as first or last row 2024-11-16T05:50:08,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46129 {}] regionserver.HRegion(8855): Flush requested on ee760dfd2eb02f6e118668b4ae1eee46 2024-11-16T05:50:08,789 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ee760dfd2eb02f6e118668b4ae1eee46 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-16T05:50:08,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741841_1017 (size=42824) 2024-11-16T05:50:08,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741841_1017 (size=42824) 2024-11-16T05:50:08,794 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/20c940d1210e4eec99540065aadebbdc is 1080, key is row0048/info:/1731736208764/Put/seqid=0 2024-11-16T05:50:08,800 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/18945e0387b64ddb92f1f6d2af522ec4 as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/18945e0387b64ddb92f1f6d2af522ec4 2024-11-16T05:50:08,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741842_1018 (size=16817) 2024-11-16T05:50:08,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741842_1018 (size=16817) 2024-11-16T05:50:08,804 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/20c940d1210e4eec99540065aadebbdc 2024-11-16T05:50:08,808 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ee760dfd2eb02f6e118668b4ae1eee46/info of ee760dfd2eb02f6e118668b4ae1eee46 into 18945e0387b64ddb92f1f6d2af522ec4(size=41.8 K), total size for store is 59.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T05:50:08,808 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ee760dfd2eb02f6e118668b4ae1eee46: 2024-11-16T05:50:08,808 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46., storeName=ee760dfd2eb02f6e118668b4ae1eee46/info, priority=13, startTime=1731736208762; duration=0sec 2024-11-16T05:50:08,809 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=59.3 K, sizeToCheck=16.0 K 2024-11-16T05:50:08,809 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:50:08,809 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/18945e0387b64ddb92f1f6d2af522ec4 because midkey is the same as first or last row 2024-11-16T05:50:08,809 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=59.3 K, sizeToCheck=16.0 K 2024-11-16T05:50:08,809 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:50:08,809 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/18945e0387b64ddb92f1f6d2af522ec4 because midkey is the same as first or last row 2024-11-16T05:50:08,809 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=59.3 K, sizeToCheck=16.0 K 2024-11-16T05:50:08,809 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:50:08,809 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/18945e0387b64ddb92f1f6d2af522ec4 because midkey is the same as first or last row 2024-11-16T05:50:08,809 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T05:50:08,809 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ee760dfd2eb02f6e118668b4ae1eee46:info 2024-11-16T05:50:08,810 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/20c940d1210e4eec99540065aadebbdc as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/20c940d1210e4eec99540065aadebbdc 2024-11-16T05:50:08,815 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/20c940d1210e4eec99540065aadebbdc, entries=11, sequenceid=74, filesize=16.4 K 2024-11-16T05:50:08,816 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=6.30 KB/6456 for ee760dfd2eb02f6e118668b4ae1eee46 in 27ms, sequenceid=74, compaction requested=true 2024-11-16T05:50:08,816 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ee760dfd2eb02f6e118668b4ae1eee46: 2024-11-16T05:50:08,816 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=75.7 K, sizeToCheck=16.0 K 2024-11-16T05:50:08,816 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:50:08,816 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/18945e0387b64ddb92f1f6d2af522ec4 because midkey is the same as first or last row 2024-11-16T05:50:08,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ee760dfd2eb02f6e118668b4ae1eee46:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T05:50:08,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T05:50:08,816 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T05:50:08,817 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 77535 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T05:50:08,818 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HStore(1541): ee760dfd2eb02f6e118668b4ae1eee46/info is initiating minor compaction (all files) 2024-11-16T05:50:08,818 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ee760dfd2eb02f6e118668b4ae1eee46/info in TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46. 2024-11-16T05:50:08,818 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/18945e0387b64ddb92f1f6d2af522ec4, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/3b8710c33d894279af2ec02670779446, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/20c940d1210e4eec99540065aadebbdc] into tmpdir=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp, totalSize=75.7 K 2024-11-16T05:50:08,818 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.Compactor(225): Compacting 18945e0387b64ddb92f1f6d2af522ec4, keycount=35, bloomtype=ROW, size=41.8 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731736206657 2024-11-16T05:50:08,818 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3b8710c33d894279af2ec02670779446, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1731736208735 2024-11-16T05:50:08,819 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.Compactor(225): Compacting 20c940d1210e4eec99540065aadebbdc, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1731736208764 2024-11-16T05:50:08,830 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ee760dfd2eb02f6e118668b4ae1eee46#info#compaction#61 average throughput is 29.76 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T05:50:08,830 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/471e5fdb54a44ba7a500d90e91496c13 is 1080, key is row0001/info:/1731736206657/Put/seqid=0 2024-11-16T05:50:08,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741843_1019 (size=67766) 2024-11-16T05:50:08,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741843_1019 (size=67766) 2024-11-16T05:50:08,843 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/471e5fdb54a44ba7a500d90e91496c13 as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/471e5fdb54a44ba7a500d90e91496c13 2024-11-16T05:50:08,849 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ee760dfd2eb02f6e118668b4ae1eee46/info of ee760dfd2eb02f6e118668b4ae1eee46 into 471e5fdb54a44ba7a500d90e91496c13(size=66.2 K), total size for store is 66.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T05:50:08,849 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ee760dfd2eb02f6e118668b4ae1eee46: 2024-11-16T05:50:08,849 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46., storeName=ee760dfd2eb02f6e118668b4ae1eee46/info, priority=13, startTime=1731736208816; duration=0sec 2024-11-16T05:50:08,849 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=66.2 K, sizeToCheck=16.0 K 2024-11-16T05:50:08,849 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:50:08,849 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/471e5fdb54a44ba7a500d90e91496c13 because midkey is the same as first or last row 2024-11-16T05:50:08,849 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=66.2 K, sizeToCheck=16.0 K 2024-11-16T05:50:08,849 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:50:08,849 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/471e5fdb54a44ba7a500d90e91496c13 because midkey is the same as first or last row 2024-11-16T05:50:08,849 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=66.2 K, sizeToCheck=16.0 K 2024-11-16T05:50:08,850 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:50:08,850 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/471e5fdb54a44ba7a500d90e91496c13 because midkey is the same as first or last row 2024-11-16T05:50:08,850 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T05:50:08,850 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ee760dfd2eb02f6e118668b4ae1eee46:info 2024-11-16T05:50:09,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:09,536 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:09,811 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T05:50:09,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:09,814 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:09,814 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:09,814 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:09,815 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:09,815 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:09,816 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:09,817 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:09,847 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:09,848 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:09,848 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:09,848 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:09,849 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:09,849 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:09,855 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:09,855 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:09,856 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:09,860 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:10,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:10,537 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:10,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46129 {}] regionserver.HRegion(8855): Flush requested on ee760dfd2eb02f6e118668b4ae1eee46 2024-11-16T05:50:10,808 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ee760dfd2eb02f6e118668b4ae1eee46 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T05:50:10,815 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/4562d832e149445c8b7155aa56eac3e5 is 1080, key is row0059/info:/1731736208791/Put/seqid=0 2024-11-16T05:50:10,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741844_1020 (size=12509) 2024-11-16T05:50:10,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741844_1020 (size=12509) 2024-11-16T05:50:10,823 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=86 (bloomFilter=true), to=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/4562d832e149445c8b7155aa56eac3e5 2024-11-16T05:50:10,830 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/4562d832e149445c8b7155aa56eac3e5 as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/4562d832e149445c8b7155aa56eac3e5 2024-11-16T05:50:10,836 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/4562d832e149445c8b7155aa56eac3e5, entries=7, sequenceid=86, filesize=12.2 K 2024-11-16T05:50:10,836 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for ee760dfd2eb02f6e118668b4ae1eee46 in 29ms, sequenceid=86, compaction requested=false 2024-11-16T05:50:10,837 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ee760dfd2eb02f6e118668b4ae1eee46: 2024-11-16T05:50:10,837 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=78.4 K, sizeToCheck=16.0 K 2024-11-16T05:50:10,837 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:50:10,837 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/471e5fdb54a44ba7a500d90e91496c13 because midkey is the same as first or last row 2024-11-16T05:50:10,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46129 {}] regionserver.HRegion(8855): Flush requested on ee760dfd2eb02f6e118668b4ae1eee46 2024-11-16T05:50:10,838 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ee760dfd2eb02f6e118668b4ae1eee46 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-16T05:50:10,842 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/1dabe2502d254b95ba87859a869ab909 is 1080, key is row0066/info:/1731736210809/Put/seqid=0 2024-11-16T05:50:10,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741845_1021 (size=18987) 2024-11-16T05:50:10,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741845_1021 (size=18987) 2024-11-16T05:50:10,847 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=102 (bloomFilter=true), to=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/1dabe2502d254b95ba87859a869ab909 2024-11-16T05:50:10,854 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/1dabe2502d254b95ba87859a869ab909 as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/1dabe2502d254b95ba87859a869ab909 2024-11-16T05:50:10,859 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/1dabe2502d254b95ba87859a869ab909, entries=13, sequenceid=102, filesize=18.5 K 2024-11-16T05:50:10,860 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=11.56 KB/11836 for ee760dfd2eb02f6e118668b4ae1eee46 in 22ms, sequenceid=102, compaction requested=true 2024-11-16T05:50:10,860 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ee760dfd2eb02f6e118668b4ae1eee46: 2024-11-16T05:50:10,860 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=96.9 K, sizeToCheck=16.0 K 2024-11-16T05:50:10,860 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:50:10,860 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/471e5fdb54a44ba7a500d90e91496c13 because midkey is the same as first or last row 2024-11-16T05:50:10,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ee760dfd2eb02f6e118668b4ae1eee46:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T05:50:10,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T05:50:10,860 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T05:50:10,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46129 {}] regionserver.HRegion(8855): Flush requested on ee760dfd2eb02f6e118668b4ae1eee46 2024-11-16T05:50:10,861 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ee760dfd2eb02f6e118668b4ae1eee46 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-16T05:50:10,861 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 99262 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T05:50:10,861 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HStore(1541): ee760dfd2eb02f6e118668b4ae1eee46/info is initiating minor compaction (all files) 2024-11-16T05:50:10,862 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ee760dfd2eb02f6e118668b4ae1eee46/info in TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46. 2024-11-16T05:50:10,862 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/471e5fdb54a44ba7a500d90e91496c13, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/4562d832e149445c8b7155aa56eac3e5, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/1dabe2502d254b95ba87859a869ab909] into tmpdir=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp, totalSize=96.9 K 2024-11-16T05:50:10,862 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.Compactor(225): Compacting 471e5fdb54a44ba7a500d90e91496c13, keycount=58, bloomtype=ROW, size=66.2 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1731736206657 2024-11-16T05:50:10,863 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4562d832e149445c8b7155aa56eac3e5, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1731736208791 2024-11-16T05:50:10,863 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1dabe2502d254b95ba87859a869ab909, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1731736210809 2024-11-16T05:50:10,865 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/a4cb54d12dc54c8e90be29a4406c1223 is 1080, key is row0079/info:/1731736210839/Put/seqid=0 2024-11-16T05:50:10,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741846_1022 (size=17894) 2024-11-16T05:50:10,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741846_1022 (size=17894) 2024-11-16T05:50:10,875 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/a4cb54d12dc54c8e90be29a4406c1223 2024-11-16T05:50:10,877 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ee760dfd2eb02f6e118668b4ae1eee46#info#compaction#65 average throughput is 26.68 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T05:50:10,877 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/bffa10a0290f42da80072afe62603ff4 is 1080, key is row0001/info:/1731736206657/Put/seqid=0 2024-11-16T05:50:10,881 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/a4cb54d12dc54c8e90be29a4406c1223 as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/a4cb54d12dc54c8e90be29a4406c1223 2024-11-16T05:50:10,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741847_1023 (size=89485) 2024-11-16T05:50:10,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741847_1023 (size=89485) 2024-11-16T05:50:10,885 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/a4cb54d12dc54c8e90be29a4406c1223, entries=12, sequenceid=117, filesize=17.5 K 2024-11-16T05:50:10,886 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/bffa10a0290f42da80072afe62603ff4 as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/bffa10a0290f42da80072afe62603ff4 2024-11-16T05:50:10,886 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=6.30 KB/6456 for ee760dfd2eb02f6e118668b4ae1eee46 in 25ms, sequenceid=117, compaction requested=false 2024-11-16T05:50:10,886 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ee760dfd2eb02f6e118668b4ae1eee46: 2024-11-16T05:50:10,886 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=114.4 K, sizeToCheck=16.0 K 2024-11-16T05:50:10,886 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:50:10,887 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/471e5fdb54a44ba7a500d90e91496c13 because midkey is the same as first or last row 2024-11-16T05:50:10,891 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ee760dfd2eb02f6e118668b4ae1eee46/info of ee760dfd2eb02f6e118668b4ae1eee46 into bffa10a0290f42da80072afe62603ff4(size=87.4 K), total size for store is 104.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T05:50:10,891 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ee760dfd2eb02f6e118668b4ae1eee46: 2024-11-16T05:50:10,891 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46., storeName=ee760dfd2eb02f6e118668b4ae1eee46/info, priority=13, startTime=1731736210860; duration=0sec 2024-11-16T05:50:10,891 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=104.9 K, sizeToCheck=16.0 K 2024-11-16T05:50:10,891 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:50:10,892 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=104.9 K, sizeToCheck=16.0 K 2024-11-16T05:50:10,892 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:50:10,892 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=104.9 K, sizeToCheck=16.0 K 2024-11-16T05:50:10,892 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T05:50:10,893 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T05:50:10,893 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T05:50:10,893 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ee760dfd2eb02f6e118668b4ae1eee46:info 2024-11-16T05:50:10,894 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40857 {}] assignment.AssignmentManager(1363): Split request from 3456ee6a3164,46129,1731736195700, parent={ENCODED => ee760dfd2eb02f6e118668b4ae1eee46, NAME => 'TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-16T05:50:10,898 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40857 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=3456ee6a3164,46129,1731736195700 2024-11-16T05:50:10,902 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40857 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=ee760dfd2eb02f6e118668b4ae1eee46, daughterA=6460758d797e29642734b0c09718591d, daughterB=8df67524193647b4367c9d2a0215dfcd 2024-11-16T05:50:10,903 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=ee760dfd2eb02f6e118668b4ae1eee46, daughterA=6460758d797e29642734b0c09718591d, daughterB=8df67524193647b4367c9d2a0215dfcd 2024-11-16T05:50:10,903 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=ee760dfd2eb02f6e118668b4ae1eee46, daughterA=6460758d797e29642734b0c09718591d, daughterB=8df67524193647b4367c9d2a0215dfcd 2024-11-16T05:50:10,903 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=ee760dfd2eb02f6e118668b4ae1eee46, daughterA=6460758d797e29642734b0c09718591d, daughterB=8df67524193647b4367c9d2a0215dfcd 2024-11-16T05:50:10,909 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ee760dfd2eb02f6e118668b4ae1eee46, UNASSIGN}] 2024-11-16T05:50:10,910 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ee760dfd2eb02f6e118668b4ae1eee46, UNASSIGN 2024-11-16T05:50:10,912 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=ee760dfd2eb02f6e118668b4ae1eee46, regionState=CLOSING, regionLocation=3456ee6a3164,46129,1731736195700 2024-11-16T05:50:10,914 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ee760dfd2eb02f6e118668b4ae1eee46, UNASSIGN because future has completed 2024-11-16T05:50:10,914 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-16T05:50:10,914 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure ee760dfd2eb02f6e118668b4ae1eee46, server=3456ee6a3164,46129,1731736195700}] 2024-11-16T05:50:11,075 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close ee760dfd2eb02f6e118668b4ae1eee46 2024-11-16T05:50:11,075 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-16T05:50:11,076 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing ee760dfd2eb02f6e118668b4ae1eee46, disabling compactions & flushes 2024-11-16T05:50:11,076 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46. 2024-11-16T05:50:11,076 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46. 2024-11-16T05:50:11,076 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46. after waiting 0 ms 2024-11-16T05:50:11,076 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46. 2024-11-16T05:50:11,076 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing ee760dfd2eb02f6e118668b4ae1eee46 1/1 column families, dataSize=6.30 KB heapSize=7 KB 2024-11-16T05:50:11,085 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/c007e7ce50644532adffb8f46e06efd8 is 1080, key is row0091/info:/1731736210862/Put/seqid=0 2024-11-16T05:50:11,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741848_1024 (size=11424) 2024-11-16T05:50:11,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741848_1024 (size=11424) 2024-11-16T05:50:11,091 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.30 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/c007e7ce50644532adffb8f46e06efd8 2024-11-16T05:50:11,097 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/.tmp/info/c007e7ce50644532adffb8f46e06efd8 as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/c007e7ce50644532adffb8f46e06efd8 2024-11-16T05:50:11,103 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/c007e7ce50644532adffb8f46e06efd8, entries=6, sequenceid=127, filesize=11.2 K 2024-11-16T05:50:11,104 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~6.30 KB/6456, heapSize ~6.98 KB/7152, currentSize=0 B/0 for ee760dfd2eb02f6e118668b4ae1eee46 in 27ms, sequenceid=127, compaction requested=true 2024-11-16T05:50:11,105 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/9e8ab5fdacd8467981015790afe2aff8, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/c847fb20aa7f4937a1738d84085bf76b, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/18945e0387b64ddb92f1f6d2af522ec4, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/2d36e9519b34493dae68d01e5b5580ea, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/3b8710c33d894279af2ec02670779446, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/471e5fdb54a44ba7a500d90e91496c13, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/20c940d1210e4eec99540065aadebbdc, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/4562d832e149445c8b7155aa56eac3e5, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/1dabe2502d254b95ba87859a869ab909] to archive 2024-11-16T05:50:11,106 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T05:50:11,107 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/9e8ab5fdacd8467981015790afe2aff8 to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/archive/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/9e8ab5fdacd8467981015790afe2aff8 2024-11-16T05:50:11,108 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/c847fb20aa7f4937a1738d84085bf76b to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/archive/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/c847fb20aa7f4937a1738d84085bf76b 2024-11-16T05:50:11,110 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/18945e0387b64ddb92f1f6d2af522ec4 to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/archive/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/18945e0387b64ddb92f1f6d2af522ec4 2024-11-16T05:50:11,111 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/2d36e9519b34493dae68d01e5b5580ea to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/archive/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/2d36e9519b34493dae68d01e5b5580ea 2024-11-16T05:50:11,112 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/3b8710c33d894279af2ec02670779446 to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/archive/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/3b8710c33d894279af2ec02670779446 2024-11-16T05:50:11,113 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/471e5fdb54a44ba7a500d90e91496c13 to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/archive/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/471e5fdb54a44ba7a500d90e91496c13 2024-11-16T05:50:11,114 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/20c940d1210e4eec99540065aadebbdc to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/archive/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/20c940d1210e4eec99540065aadebbdc 2024-11-16T05:50:11,114 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/4562d832e149445c8b7155aa56eac3e5 to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/archive/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/4562d832e149445c8b7155aa56eac3e5 2024-11-16T05:50:11,115 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/1dabe2502d254b95ba87859a869ab909 to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/archive/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/1dabe2502d254b95ba87859a869ab909 2024-11-16T05:50:11,121 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=1 2024-11-16T05:50:11,122 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46. 2024-11-16T05:50:11,122 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for ee760dfd2eb02f6e118668b4ae1eee46: Waiting for close lock at 1731736211076Running coprocessor pre-close hooks at 1731736211076Disabling compacts and flushes for region at 1731736211076Disabling writes for close at 1731736211076Obtaining lock to block concurrent updates at 1731736211076Preparing flush snapshotting stores in ee760dfd2eb02f6e118668b4ae1eee46 at 1731736211076Finished memstore snapshotting TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46., syncing WAL and waiting on mvcc, flushsize=dataSize=6456, getHeapSize=7152, getOffHeapSize=0, getCellsCount=6 at 1731736211077 (+1 ms)Flushing stores of TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46. at 1731736211078 (+1 ms)Flushing ee760dfd2eb02f6e118668b4ae1eee46/info: creating writer at 1731736211078Flushing ee760dfd2eb02f6e118668b4ae1eee46/info: appending metadata at 1731736211085 (+7 ms)Flushing ee760dfd2eb02f6e118668b4ae1eee46/info: closing flushed file at 1731736211085Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1553c2fe: reopening flushed file at 1731736211096 (+11 ms)Finished flush of dataSize ~6.30 KB/6456, heapSize ~6.98 KB/7152, currentSize=0 B/0 for ee760dfd2eb02f6e118668b4ae1eee46 in 27ms, sequenceid=127, compaction requested=true at 1731736211104 (+8 ms)Writing region close event to WAL at 1731736211118 (+14 ms)Running coprocessor post-close hooks at 1731736211122 (+4 ms)Closed at 1731736211122 2024-11-16T05:50:11,124 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed ee760dfd2eb02f6e118668b4ae1eee46 2024-11-16T05:50:11,125 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=ee760dfd2eb02f6e118668b4ae1eee46, regionState=CLOSED 2024-11-16T05:50:11,127 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure ee760dfd2eb02f6e118668b4ae1eee46, server=3456ee6a3164,46129,1731736195700 because future has completed 2024-11-16T05:50:11,130 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-16T05:50:11,130 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure ee760dfd2eb02f6e118668b4ae1eee46, server=3456ee6a3164,46129,1731736195700 in 214 msec 2024-11-16T05:50:11,132 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-16T05:50:11,132 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ee760dfd2eb02f6e118668b4ae1eee46, UNASSIGN in 221 msec 2024-11-16T05:50:11,140 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:50:11,143 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 3 storefiles, region=ee760dfd2eb02f6e118668b4ae1eee46, threads=3 2024-11-16T05:50:11,145 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/a4cb54d12dc54c8e90be29a4406c1223 for region: ee760dfd2eb02f6e118668b4ae1eee46 2024-11-16T05:50:11,145 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/c007e7ce50644532adffb8f46e06efd8 for region: ee760dfd2eb02f6e118668b4ae1eee46 2024-11-16T05:50:11,145 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/bffa10a0290f42da80072afe62603ff4 for region: ee760dfd2eb02f6e118668b4ae1eee46 2024-11-16T05:50:11,158 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/c007e7ce50644532adffb8f46e06efd8, top=true 2024-11-16T05:50:11,159 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/a4cb54d12dc54c8e90be29a4406c1223, top=true 2024-11-16T05:50:11,165 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/TestLogRolling-testLogRolling=ee760dfd2eb02f6e118668b4ae1eee46-a4cb54d12dc54c8e90be29a4406c1223 for child: 8df67524193647b4367c9d2a0215dfcd, parent: ee760dfd2eb02f6e118668b4ae1eee46 2024-11-16T05:50:11,165 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/a4cb54d12dc54c8e90be29a4406c1223 for region: ee760dfd2eb02f6e118668b4ae1eee46 2024-11-16T05:50:11,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741849_1025 (size=27) 2024-11-16T05:50:11,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741849_1025 (size=27) 2024-11-16T05:50:11,168 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/TestLogRolling-testLogRolling=ee760dfd2eb02f6e118668b4ae1eee46-c007e7ce50644532adffb8f46e06efd8 for child: 8df67524193647b4367c9d2a0215dfcd, parent: ee760dfd2eb02f6e118668b4ae1eee46 2024-11-16T05:50:11,168 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/c007e7ce50644532adffb8f46e06efd8 for region: ee760dfd2eb02f6e118668b4ae1eee46 2024-11-16T05:50:11,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741850_1026 (size=27) 2024-11-16T05:50:11,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741850_1026 (size=27) 2024-11-16T05:50:11,178 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/bffa10a0290f42da80072afe62603ff4 for region: ee760dfd2eb02f6e118668b4ae1eee46 2024-11-16T05:50:11,180 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region ee760dfd2eb02f6e118668b4ae1eee46 Daughter A: [hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/6460758d797e29642734b0c09718591d/info/bffa10a0290f42da80072afe62603ff4.ee760dfd2eb02f6e118668b4ae1eee46] storefiles, Daughter B: [hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/TestLogRolling-testLogRolling=ee760dfd2eb02f6e118668b4ae1eee46-a4cb54d12dc54c8e90be29a4406c1223, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/bffa10a0290f42da80072afe62603ff4.ee760dfd2eb02f6e118668b4ae1eee46, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/TestLogRolling-testLogRolling=ee760dfd2eb02f6e118668b4ae1eee46-c007e7ce50644532adffb8f46e06efd8] storefiles. 2024-11-16T05:50:11,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741851_1027 (size=71) 2024-11-16T05:50:11,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741851_1027 (size=71) 2024-11-16T05:50:11,191 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:50:11,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741852_1028 (size=71) 2024-11-16T05:50:11,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741852_1028 (size=71) 2024-11-16T05:50:11,210 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:50:11,221 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/6460758d797e29642734b0c09718591d/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-11-16T05:50:11,226 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-11-16T05:50:11,228 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731736211228"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731736211228"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731736211228"}]},"ts":"1731736211228"} 2024-11-16T05:50:11,228 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731736210898.6460758d797e29642734b0c09718591d.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731736211228"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731736211228"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731736211228"}]},"ts":"1731736211228"} 2024-11-16T05:50:11,228 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731736211228"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731736211228"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731736211228"}]},"ts":"1731736211228"} 2024-11-16T05:50:11,247 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6460758d797e29642734b0c09718591d, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8df67524193647b4367c9d2a0215dfcd, ASSIGN}] 2024-11-16T05:50:11,250 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6460758d797e29642734b0c09718591d, ASSIGN 2024-11-16T05:50:11,250 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8df67524193647b4367c9d2a0215dfcd, ASSIGN 2024-11-16T05:50:11,251 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8df67524193647b4367c9d2a0215dfcd, ASSIGN; state=SPLITTING_NEW, location=3456ee6a3164,46129,1731736195700; forceNewPlan=false, retain=false 2024-11-16T05:50:11,251 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6460758d797e29642734b0c09718591d, ASSIGN; state=SPLITTING_NEW, location=3456ee6a3164,46129,1731736195700; forceNewPlan=false, retain=false 2024-11-16T05:50:11,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:11,402 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=8df67524193647b4367c9d2a0215dfcd, regionState=OPENING, regionLocation=3456ee6a3164,46129,1731736195700 2024-11-16T05:50:11,402 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=6460758d797e29642734b0c09718591d, regionState=OPENING, regionLocation=3456ee6a3164,46129,1731736195700 2024-11-16T05:50:11,404 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8df67524193647b4367c9d2a0215dfcd, ASSIGN because future has completed 2024-11-16T05:50:11,405 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8df67524193647b4367c9d2a0215dfcd, server=3456ee6a3164,46129,1731736195700}] 2024-11-16T05:50:11,406 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6460758d797e29642734b0c09718591d, ASSIGN because future has completed 2024-11-16T05:50:11,406 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6460758d797e29642734b0c09718591d, server=3456ee6a3164,46129,1731736195700}] 2024-11-16T05:50:11,537 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:11,561 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd. 2024-11-16T05:50:11,561 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 8df67524193647b4367c9d2a0215dfcd, NAME => 'TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-16T05:50:11,562 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 8df67524193647b4367c9d2a0215dfcd 2024-11-16T05:50:11,562 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:50:11,562 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 8df67524193647b4367c9d2a0215dfcd 2024-11-16T05:50:11,562 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 8df67524193647b4367c9d2a0215dfcd 2024-11-16T05:50:11,563 INFO [StoreOpener-8df67524193647b4367c9d2a0215dfcd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 8df67524193647b4367c9d2a0215dfcd 2024-11-16T05:50:11,564 INFO [StoreOpener-8df67524193647b4367c9d2a0215dfcd-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8df67524193647b4367c9d2a0215dfcd columnFamilyName info 2024-11-16T05:50:11,564 DEBUG [StoreOpener-8df67524193647b4367c9d2a0215dfcd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:50:11,573 DEBUG [StoreOpener-8df67524193647b4367c9d2a0215dfcd-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/TestLogRolling-testLogRolling=ee760dfd2eb02f6e118668b4ae1eee46-a4cb54d12dc54c8e90be29a4406c1223 2024-11-16T05:50:11,576 DEBUG [StoreOpener-8df67524193647b4367c9d2a0215dfcd-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/TestLogRolling-testLogRolling=ee760dfd2eb02f6e118668b4ae1eee46-c007e7ce50644532adffb8f46e06efd8 2024-11-16T05:50:11,583 DEBUG [StoreOpener-8df67524193647b4367c9d2a0215dfcd-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/bffa10a0290f42da80072afe62603ff4.ee760dfd2eb02f6e118668b4ae1eee46->hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/bffa10a0290f42da80072afe62603ff4-top 2024-11-16T05:50:11,584 INFO [StoreOpener-8df67524193647b4367c9d2a0215dfcd-1 {}] regionserver.HStore(327): Store=8df67524193647b4367c9d2a0215dfcd/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T05:50:11,584 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 8df67524193647b4367c9d2a0215dfcd 2024-11-16T05:50:11,585 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd 2024-11-16T05:50:11,586 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd 2024-11-16T05:50:11,587 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 8df67524193647b4367c9d2a0215dfcd 2024-11-16T05:50:11,587 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 8df67524193647b4367c9d2a0215dfcd 2024-11-16T05:50:11,589 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 8df67524193647b4367c9d2a0215dfcd 2024-11-16T05:50:11,590 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 8df67524193647b4367c9d2a0215dfcd; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=715471, jitterRate=-0.09023238718509674}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T05:50:11,590 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8df67524193647b4367c9d2a0215dfcd 2024-11-16T05:50:11,590 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 8df67524193647b4367c9d2a0215dfcd: Running coprocessor pre-open hook at 1731736211562Writing region info on filesystem at 1731736211562Initializing all the Stores at 1731736211563 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736211563Cleaning up temporary data from old regions at 1731736211587 (+24 ms)Running coprocessor post-open hooks at 1731736211590 (+3 ms)Region opened successfully at 1731736211590 2024-11-16T05:50:11,591 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd., pid=12, masterSystemTime=1731736211557 2024-11-16T05:50:11,591 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 8df67524193647b4367c9d2a0215dfcd:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T05:50:11,591 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T05:50:11,591 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T05:50:11,593 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd. 2024-11-16T05:50:11,593 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HStore(1541): 8df67524193647b4367c9d2a0215dfcd/info is initiating minor compaction (all files) 2024-11-16T05:50:11,593 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8df67524193647b4367c9d2a0215dfcd/info in TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd. 2024-11-16T05:50:11,594 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/bffa10a0290f42da80072afe62603ff4.ee760dfd2eb02f6e118668b4ae1eee46->hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/bffa10a0290f42da80072afe62603ff4-top, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/TestLogRolling-testLogRolling=ee760dfd2eb02f6e118668b4ae1eee46-a4cb54d12dc54c8e90be29a4406c1223, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/TestLogRolling-testLogRolling=ee760dfd2eb02f6e118668b4ae1eee46-c007e7ce50644532adffb8f46e06efd8] into tmpdir=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp, totalSize=116.0 K 2024-11-16T05:50:11,594 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd. 2024-11-16T05:50:11,594 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd. 2024-11-16T05:50:11,594 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.Compactor(225): Compacting bffa10a0290f42da80072afe62603ff4.ee760dfd2eb02f6e118668b4ae1eee46, keycount=39, bloomtype=ROW, size=87.4 K, encoding=NONE, compression=NONE, seqNum=103, earliestPutTs=1731736206657 2024-11-16T05:50:11,595 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731736210898.6460758d797e29642734b0c09718591d. 2024-11-16T05:50:11,595 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 6460758d797e29642734b0c09718591d, NAME => 'TestLogRolling-testLogRolling,,1731736210898.6460758d797e29642734b0c09718591d.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-16T05:50:11,595 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=8df67524193647b4367c9d2a0215dfcd, regionState=OPEN, openSeqNum=131, regionLocation=3456ee6a3164,46129,1731736195700 2024-11-16T05:50:11,595 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 6460758d797e29642734b0c09718591d 2024-11-16T05:50:11,597 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731736210898.6460758d797e29642734b0c09718591d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:50:11,597 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 6460758d797e29642734b0c09718591d 2024-11-16T05:50:11,597 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=ee760dfd2eb02f6e118668b4ae1eee46-a4cb54d12dc54c8e90be29a4406c1223, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1731736210839 2024-11-16T05:50:11,597 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 6460758d797e29642734b0c09718591d 2024-11-16T05:50:11,597 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46129 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-16T05:50:11,597 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-16T05:50:11,598 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-11-16T05:50:11,598 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=ee760dfd2eb02f6e118668b4ae1eee46-c007e7ce50644532adffb8f46e06efd8, keycount=6, bloomtype=ROW, size=11.2 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1731736210862 2024-11-16T05:50:11,598 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8df67524193647b4367c9d2a0215dfcd, server=3456ee6a3164,46129,1731736195700 because future has completed 2024-11-16T05:50:11,603 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-16T05:50:11,603 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 8df67524193647b4367c9d2a0215dfcd, server=3456ee6a3164,46129,1731736195700 in 194 msec 2024-11-16T05:50:11,608 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8df67524193647b4367c9d2a0215dfcd, ASSIGN in 357 msec 2024-11-16T05:50:11,609 INFO [StoreOpener-6460758d797e29642734b0c09718591d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 6460758d797e29642734b0c09718591d 2024-11-16T05:50:11,614 INFO [StoreOpener-6460758d797e29642734b0c09718591d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6460758d797e29642734b0c09718591d columnFamilyName info 2024-11-16T05:50:11,614 DEBUG [StoreOpener-6460758d797e29642734b0c09718591d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:50:11,626 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/hbase/meta/1588230740/.tmp/info/e7293ae594d540fa9f0ce44776e7e99b is 193, key is TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd./info:regioninfo/1731736211595/Put/seqid=0 2024-11-16T05:50:11,634 DEBUG [StoreOpener-6460758d797e29642734b0c09718591d-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/6460758d797e29642734b0c09718591d/info/bffa10a0290f42da80072afe62603ff4.ee760dfd2eb02f6e118668b4ae1eee46->hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/bffa10a0290f42da80072afe62603ff4-bottom 2024-11-16T05:50:11,635 INFO [StoreOpener-6460758d797e29642734b0c09718591d-1 {}] regionserver.HStore(327): Store=6460758d797e29642734b0c09718591d/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T05:50:11,635 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 6460758d797e29642734b0c09718591d 2024-11-16T05:50:11,636 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/6460758d797e29642734b0c09718591d 2024-11-16T05:50:11,637 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/6460758d797e29642734b0c09718591d 2024-11-16T05:50:11,637 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 6460758d797e29642734b0c09718591d 2024-11-16T05:50:11,637 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 6460758d797e29642734b0c09718591d 2024-11-16T05:50:11,639 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 6460758d797e29642734b0c09718591d 2024-11-16T05:50:11,639 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 6460758d797e29642734b0c09718591d; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=828376, jitterRate=0.053335607051849365}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T05:50:11,640 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6460758d797e29642734b0c09718591d 2024-11-16T05:50:11,640 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 6460758d797e29642734b0c09718591d: Running coprocessor pre-open hook at 1731736211597Writing region info on filesystem at 1731736211597Initializing all the Stores at 1731736211599 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736211599Cleaning up temporary data from old regions at 1731736211637 (+38 ms)Running coprocessor post-open hooks at 1731736211640 (+3 ms)Region opened successfully at 1731736211640 2024-11-16T05:50:11,640 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731736210898.6460758d797e29642734b0c09718591d., pid=13, masterSystemTime=1731736211557 2024-11-16T05:50:11,641 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 6460758d797e29642734b0c09718591d:info, priority=-2147483648, current under compaction store size is 2 2024-11-16T05:50:11,641 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T05:50:11,641 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-16T05:50:11,641 INFO [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731736210898.6460758d797e29642734b0c09718591d. 2024-11-16T05:50:11,641 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.HStore(1541): 6460758d797e29642734b0c09718591d/info is initiating minor compaction (all files) 2024-11-16T05:50:11,642 INFO [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 6460758d797e29642734b0c09718591d/info in TestLogRolling-testLogRolling,,1731736210898.6460758d797e29642734b0c09718591d. 2024-11-16T05:50:11,642 INFO [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/6460758d797e29642734b0c09718591d/info/bffa10a0290f42da80072afe62603ff4.ee760dfd2eb02f6e118668b4ae1eee46->hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/bffa10a0290f42da80072afe62603ff4-bottom] into tmpdir=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/6460758d797e29642734b0c09718591d/.tmp, totalSize=87.4 K 2024-11-16T05:50:11,643 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] compactions.Compactor(225): Compacting bffa10a0290f42da80072afe62603ff4.ee760dfd2eb02f6e118668b4ae1eee46, keycount=39, bloomtype=ROW, size=87.4 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1731736206657 2024-11-16T05:50:11,643 DEBUG [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731736210898.6460758d797e29642734b0c09718591d. 2024-11-16T05:50:11,644 INFO [RS_OPEN_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731736210898.6460758d797e29642734b0c09718591d. 2024-11-16T05:50:11,645 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=6460758d797e29642734b0c09718591d, regionState=OPEN, openSeqNum=131, regionLocation=3456ee6a3164,46129,1731736195700 2024-11-16T05:50:11,647 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8df67524193647b4367c9d2a0215dfcd#info#compaction#68 average throughput is 11.97 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T05:50:11,647 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/6e480ed4bd9444f886a3645c8defe2ad is 1080, key is row0062/info:/1731736208798/Put/seqid=0 2024-11-16T05:50:11,650 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6460758d797e29642734b0c09718591d, server=3456ee6a3164,46129,1731736195700 because future has completed 2024-11-16T05:50:11,666 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-11-16T05:50:11,666 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 6460758d797e29642734b0c09718591d, server=3456ee6a3164,46129,1731736195700 in 257 msec 2024-11-16T05:50:11,670 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=7 2024-11-16T05:50:11,670 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6460758d797e29642734b0c09718591d, ASSIGN in 419 msec 2024-11-16T05:50:11,673 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=ee760dfd2eb02f6e118668b4ae1eee46, daughterA=6460758d797e29642734b0c09718591d, daughterB=8df67524193647b4367c9d2a0215dfcd in 772 msec 2024-11-16T05:50:11,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741853_1029 (size=9882) 2024-11-16T05:50:11,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741853_1029 (size=9882) 2024-11-16T05:50:11,694 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/hbase/meta/1588230740/.tmp/info/e7293ae594d540fa9f0ce44776e7e99b 2024-11-16T05:50:11,702 INFO [RS:0;3456ee6a3164:46129-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6460758d797e29642734b0c09718591d#info#compaction#69 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T05:50:11,703 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/6460758d797e29642734b0c09718591d/.tmp/info/f3a5774480464eb294e5b5734604e81b is 1080, key is row0001/info:/1731736206657/Put/seqid=0 2024-11-16T05:50:11,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741854_1030 (size=42984) 2024-11-16T05:50:11,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741854_1030 (size=42984) 2024-11-16T05:50:11,722 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/6e480ed4bd9444f886a3645c8defe2ad as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/6e480ed4bd9444f886a3645c8defe2ad 2024-11-16T05:50:11,733 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8df67524193647b4367c9d2a0215dfcd/info of 8df67524193647b4367c9d2a0215dfcd into 6e480ed4bd9444f886a3645c8defe2ad(size=42.0 K), total size for store is 42.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T05:50:11,734 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8df67524193647b4367c9d2a0215dfcd: 2024-11-16T05:50:11,734 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd., storeName=8df67524193647b4367c9d2a0215dfcd/info, priority=13, startTime=1731736211591; duration=0sec 2024-11-16T05:50:11,734 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T05:50:11,734 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8df67524193647b4367c9d2a0215dfcd:info 2024-11-16T05:50:11,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741855_1031 (size=70862) 2024-11-16T05:50:11,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741855_1031 (size=70862) 2024-11-16T05:50:11,745 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/6460758d797e29642734b0c09718591d/.tmp/info/f3a5774480464eb294e5b5734604e81b as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/6460758d797e29642734b0c09718591d/info/f3a5774480464eb294e5b5734604e81b 2024-11-16T05:50:11,747 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/hbase/meta/1588230740/.tmp/ns/58428a6eca274e4e87ed6d972ab1c16c is 43, key is default/ns:d/1731736196507/Put/seqid=0 2024-11-16T05:50:11,759 INFO [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 6460758d797e29642734b0c09718591d/info of 6460758d797e29642734b0c09718591d into f3a5774480464eb294e5b5734604e81b(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T05:50:11,759 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 6460758d797e29642734b0c09718591d: 2024-11-16T05:50:11,759 INFO [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731736210898.6460758d797e29642734b0c09718591d., storeName=6460758d797e29642734b0c09718591d/info, priority=15, startTime=1731736211640; duration=0sec 2024-11-16T05:50:11,759 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T05:50:11,759 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6460758d797e29642734b0c09718591d:info 2024-11-16T05:50:11,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741856_1032 (size=5153) 2024-11-16T05:50:11,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741856_1032 (size=5153) 2024-11-16T05:50:11,767 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/hbase/meta/1588230740/.tmp/ns/58428a6eca274e4e87ed6d972ab1c16c 2024-11-16T05:50:11,794 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/hbase/meta/1588230740/.tmp/table/761acd0c9adf4e37bc1b42ed2fb29fc6 is 65, key is TestLogRolling-testLogRolling/table:state/1731736197019/Put/seqid=0 2024-11-16T05:50:11,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741857_1033 (size=5340) 2024-11-16T05:50:11,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741857_1033 (size=5340) 2024-11-16T05:50:11,806 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/hbase/meta/1588230740/.tmp/table/761acd0c9adf4e37bc1b42ed2fb29fc6 2024-11-16T05:50:11,814 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/hbase/meta/1588230740/.tmp/info/e7293ae594d540fa9f0ce44776e7e99b as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/hbase/meta/1588230740/info/e7293ae594d540fa9f0ce44776e7e99b 2024-11-16T05:50:11,820 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/hbase/meta/1588230740/info/e7293ae594d540fa9f0ce44776e7e99b, entries=30, sequenceid=17, filesize=9.7 K 2024-11-16T05:50:11,821 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/hbase/meta/1588230740/.tmp/ns/58428a6eca274e4e87ed6d972ab1c16c as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/hbase/meta/1588230740/ns/58428a6eca274e4e87ed6d972ab1c16c 2024-11-16T05:50:11,826 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/hbase/meta/1588230740/ns/58428a6eca274e4e87ed6d972ab1c16c, entries=2, sequenceid=17, filesize=5.0 K 2024-11-16T05:50:11,827 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/hbase/meta/1588230740/.tmp/table/761acd0c9adf4e37bc1b42ed2fb29fc6 as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/hbase/meta/1588230740/table/761acd0c9adf4e37bc1b42ed2fb29fc6 2024-11-16T05:50:11,831 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/hbase/meta/1588230740/table/761acd0c9adf4e37bc1b42ed2fb29fc6, entries=2, sequenceid=17, filesize=5.2 K 2024-11-16T05:50:11,832 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 235ms, sequenceid=17, compaction requested=false 2024-11-16T05:50:11,833 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-16T05:50:12,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:12,538 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:12,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46129 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:57954 deadline: 1731736222874, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46. is not online on 3456ee6a3164,46129,1731736195700 2024-11-16T05:50:12,881 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46., hostname=3456ee6a3164,46129,1731736195700, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46., hostname=3456ee6a3164,46129,1731736195700, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46. is not online on 3456ee6a3164,46129,1731736195700 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T05:50:12,881 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46., hostname=3456ee6a3164,46129,1731736195700, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46. is not online on 3456ee6a3164,46129,1731736195700 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T05:50:12,881 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731736196634.ee760dfd2eb02f6e118668b4ae1eee46., hostname=3456ee6a3164,46129,1731736195700, seqNum=2 from cache 2024-11-16T05:50:13,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:13,539 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:14,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:14,539 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:15,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:15,540 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:16,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:16,540 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:16,624 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T05:50:16,625 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:16,625 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:16,626 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:16,626 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:16,626 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:16,626 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:16,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:16,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:16,662 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:16,662 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:16,662 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:16,663 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:16,663 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:16,664 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:16,670 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:16,670 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:16,671 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:16,675 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T05:50:17,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:17,541 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:18,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:18,542 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:19,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:19,543 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:20,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:20,543 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:21,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:21,544 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:22,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:22,546 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:22,921 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd., hostname=3456ee6a3164,46129,1731736195700, seqNum=131] 2024-11-16T05:50:22,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46129 {}] regionserver.HRegion(8855): Flush requested on 8df67524193647b4367c9d2a0215dfcd 2024-11-16T05:50:22,933 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8df67524193647b4367c9d2a0215dfcd 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T05:50:22,938 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/3b5d5a58f3054f4e84db09c414a97ed5 is 1080, key is row0097/info:/1731736222922/Put/seqid=0 2024-11-16T05:50:22,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741858_1034 (size=12516) 2024-11-16T05:50:22,943 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=141 (bloomFilter=true), to=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/3b5d5a58f3054f4e84db09c414a97ed5 2024-11-16T05:50:22,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741858_1034 (size=12516) 2024-11-16T05:50:22,950 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/3b5d5a58f3054f4e84db09c414a97ed5 as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/3b5d5a58f3054f4e84db09c414a97ed5 2024-11-16T05:50:22,955 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/3b5d5a58f3054f4e84db09c414a97ed5, entries=7, sequenceid=141, filesize=12.2 K 2024-11-16T05:50:22,956 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for 8df67524193647b4367c9d2a0215dfcd in 23ms, sequenceid=141, compaction requested=false 2024-11-16T05:50:22,956 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8df67524193647b4367c9d2a0215dfcd: 2024-11-16T05:50:22,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46129 {}] regionserver.HRegion(8855): Flush requested on 8df67524193647b4367c9d2a0215dfcd 2024-11-16T05:50:22,957 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8df67524193647b4367c9d2a0215dfcd 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-16T05:50:22,961 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/bdf0e6c1766445d0935e116cd59b2087 is 1080, key is row0104/info:/1731736222934/Put/seqid=0 2024-11-16T05:50:22,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741859_1035 (size=19000) 2024-11-16T05:50:22,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741859_1035 (size=19000) 2024-11-16T05:50:22,967 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/bdf0e6c1766445d0935e116cd59b2087 2024-11-16T05:50:22,973 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/bdf0e6c1766445d0935e116cd59b2087 as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/bdf0e6c1766445d0935e116cd59b2087 2024-11-16T05:50:22,979 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/bdf0e6c1766445d0935e116cd59b2087, entries=13, sequenceid=157, filesize=18.6 K 2024-11-16T05:50:22,979 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=12.61 KB/12912 for 8df67524193647b4367c9d2a0215dfcd in 22ms, sequenceid=157, compaction requested=true 2024-11-16T05:50:22,979 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8df67524193647b4367c9d2a0215dfcd: 2024-11-16T05:50:22,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8df67524193647b4367c9d2a0215dfcd:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T05:50:22,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T05:50:22,979 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T05:50:22,980 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 74500 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T05:50:22,980 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.HStore(1541): 8df67524193647b4367c9d2a0215dfcd/info is initiating minor compaction (all files) 2024-11-16T05:50:22,981 INFO [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8df67524193647b4367c9d2a0215dfcd/info in TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd. 2024-11-16T05:50:22,981 INFO [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/6e480ed4bd9444f886a3645c8defe2ad, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/3b5d5a58f3054f4e84db09c414a97ed5, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/bdf0e6c1766445d0935e116cd59b2087] into tmpdir=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp, totalSize=72.8 K 2024-11-16T05:50:22,981 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] compactions.Compactor(225): Compacting 6e480ed4bd9444f886a3645c8defe2ad, keycount=35, bloomtype=ROW, size=42.0 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1731736208798 2024-11-16T05:50:22,981 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] compactions.Compactor(225): Compacting 3b5d5a58f3054f4e84db09c414a97ed5, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1731736222922 2024-11-16T05:50:22,982 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] compactions.Compactor(225): Compacting bdf0e6c1766445d0935e116cd59b2087, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1731736222934 2024-11-16T05:50:22,991 INFO [RS:0;3456ee6a3164:46129-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8df67524193647b4367c9d2a0215dfcd#info#compaction#74 average throughput is 28.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T05:50:22,992 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/aaa2f7ee808440c9872b2f7557597c28 is 1080, key is row0062/info:/1731736208798/Put/seqid=0 2024-11-16T05:50:22,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741860_1036 (size=64714) 2024-11-16T05:50:22,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741860_1036 (size=64714) 2024-11-16T05:50:23,002 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/aaa2f7ee808440c9872b2f7557597c28 as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/aaa2f7ee808440c9872b2f7557597c28 2024-11-16T05:50:23,007 INFO [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8df67524193647b4367c9d2a0215dfcd/info of 8df67524193647b4367c9d2a0215dfcd into aaa2f7ee808440c9872b2f7557597c28(size=63.2 K), total size for store is 63.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T05:50:23,007 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8df67524193647b4367c9d2a0215dfcd: 2024-11-16T05:50:23,008 INFO [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd., storeName=8df67524193647b4367c9d2a0215dfcd/info, priority=13, startTime=1731736222979; duration=0sec 2024-11-16T05:50:23,008 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T05:50:23,008 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8df67524193647b4367c9d2a0215dfcd:info 2024-11-16T05:50:23,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:23,546 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:24,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:24,547 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:24,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46129 {}] regionserver.HRegion(8855): Flush requested on 8df67524193647b4367c9d2a0215dfcd 2024-11-16T05:50:24,982 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8df67524193647b4367c9d2a0215dfcd 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-16T05:50:24,998 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/49c162b5e6c347b4a9fd5fddaee9dc22 is 1080, key is row0117/info:/1731736222958/Put/seqid=0 2024-11-16T05:50:25,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741861_1037 (size=19000) 2024-11-16T05:50:25,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741861_1037 (size=19000) 2024-11-16T05:50:25,028 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/49c162b5e6c347b4a9fd5fddaee9dc22 2024-11-16T05:50:25,033 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46129 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=8df67524193647b4367c9d2a0215dfcd, server=3456ee6a3164,46129,1731736195700 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-16T05:50:25,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46129 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:57954 deadline: 1731736235032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=8df67524193647b4367c9d2a0215dfcd, server=3456ee6a3164,46129,1731736195700 2024-11-16T05:50:25,034 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd., hostname=3456ee6a3164,46129,1731736195700, seqNum=131 , the old value is region=TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd., hostname=3456ee6a3164,46129,1731736195700, seqNum=131, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=8df67524193647b4367c9d2a0215dfcd, server=3456ee6a3164,46129,1731736195700 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T05:50:25,034 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd., hostname=3456ee6a3164,46129,1731736195700, seqNum=131 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=8df67524193647b4367c9d2a0215dfcd, server=3456ee6a3164,46129,1731736195700 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T05:50:25,034 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd., hostname=3456ee6a3164,46129,1731736195700, seqNum=131 because the exception is null or not the one we care about 2024-11-16T05:50:25,036 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/49c162b5e6c347b4a9fd5fddaee9dc22 as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/49c162b5e6c347b4a9fd5fddaee9dc22 2024-11-16T05:50:25,043 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/49c162b5e6c347b4a9fd5fddaee9dc22, entries=13, sequenceid=174, filesize=18.6 K 2024-11-16T05:50:25,045 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=16.81 KB/17216 for 8df67524193647b4367c9d2a0215dfcd in 63ms, sequenceid=174, compaction requested=false 2024-11-16T05:50:25,045 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8df67524193647b4367c9d2a0215dfcd: 2024-11-16T05:50:25,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:25,548 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:25,634 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T05:50:26,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:26,548 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:27,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:27,549 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:28,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:28,550 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:29,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:29,551 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:30,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:30,553 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:31,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:31,553 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:32,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:32,554 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:33,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:33,555 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:34,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:34,555 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:35,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46129 {}] regionserver.HRegion(8855): Flush requested on 8df67524193647b4367c9d2a0215dfcd 2024-11-16T05:50:35,050 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8df67524193647b4367c9d2a0215dfcd 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-16T05:50:35,055 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/d3aafbecbac642b0b7a9b82efbb50ebc is 1080, key is row0130/info:/1731736224983/Put/seqid=0 2024-11-16T05:50:35,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741862_1038 (size=23316) 2024-11-16T05:50:35,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741862_1038 (size=23316) 2024-11-16T05:50:35,063 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/d3aafbecbac642b0b7a9b82efbb50ebc 2024-11-16T05:50:35,070 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/d3aafbecbac642b0b7a9b82efbb50ebc as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/d3aafbecbac642b0b7a9b82efbb50ebc 2024-11-16T05:50:35,076 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/d3aafbecbac642b0b7a9b82efbb50ebc, entries=17, sequenceid=194, filesize=22.8 K 2024-11-16T05:50:35,077 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=11.56 KB/11836 for 8df67524193647b4367c9d2a0215dfcd in 27ms, sequenceid=194, compaction requested=true 2024-11-16T05:50:35,077 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8df67524193647b4367c9d2a0215dfcd: 2024-11-16T05:50:35,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8df67524193647b4367c9d2a0215dfcd:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T05:50:35,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T05:50:35,077 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T05:50:35,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46129 {}] regionserver.HRegion(8855): Flush requested on 8df67524193647b4367c9d2a0215dfcd 2024-11-16T05:50:35,078 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8df67524193647b4367c9d2a0215dfcd 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-16T05:50:35,079 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 107030 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T05:50:35,079 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HStore(1541): 8df67524193647b4367c9d2a0215dfcd/info is initiating minor compaction (all files) 2024-11-16T05:50:35,079 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8df67524193647b4367c9d2a0215dfcd/info in TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd. 2024-11-16T05:50:35,079 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/aaa2f7ee808440c9872b2f7557597c28, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/49c162b5e6c347b4a9fd5fddaee9dc22, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/d3aafbecbac642b0b7a9b82efbb50ebc] into tmpdir=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp, totalSize=104.5 K 2024-11-16T05:50:35,079 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.Compactor(225): Compacting aaa2f7ee808440c9872b2f7557597c28, keycount=55, bloomtype=ROW, size=63.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1731736208798 2024-11-16T05:50:35,080 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.Compactor(225): Compacting 49c162b5e6c347b4a9fd5fddaee9dc22, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1731736222958 2024-11-16T05:50:35,081 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.Compactor(225): Compacting d3aafbecbac642b0b7a9b82efbb50ebc, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1731736224983 2024-11-16T05:50:35,082 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/aa29879a2723459fb5651e7b77650522 is 1080, key is row0147/info:/1731736235052/Put/seqid=0 2024-11-16T05:50:35,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741863_1039 (size=17906) 2024-11-16T05:50:35,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741863_1039 (size=17906) 2024-11-16T05:50:35,090 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/aa29879a2723459fb5651e7b77650522 2024-11-16T05:50:35,094 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8df67524193647b4367c9d2a0215dfcd#info#compaction#78 average throughput is 43.61 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T05:50:35,095 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/3c8bc635c1cb49d584797a4a0fb32ea1 is 1080, key is row0062/info:/1731736208798/Put/seqid=0 2024-11-16T05:50:35,095 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/aa29879a2723459fb5651e7b77650522 as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/aa29879a2723459fb5651e7b77650522 2024-11-16T05:50:35,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741864_1040 (size=97233) 2024-11-16T05:50:35,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741864_1040 (size=97233) 2024-11-16T05:50:35,106 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/aa29879a2723459fb5651e7b77650522, entries=12, sequenceid=209, filesize=17.5 K 2024-11-16T05:50:35,107 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=2.10 KB/2152 for 8df67524193647b4367c9d2a0215dfcd in 29ms, sequenceid=209, compaction requested=false 2024-11-16T05:50:35,107 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8df67524193647b4367c9d2a0215dfcd: 2024-11-16T05:50:35,108 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/3c8bc635c1cb49d584797a4a0fb32ea1 as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/3c8bc635c1cb49d584797a4a0fb32ea1 2024-11-16T05:50:35,114 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8df67524193647b4367c9d2a0215dfcd/info of 8df67524193647b4367c9d2a0215dfcd into 3c8bc635c1cb49d584797a4a0fb32ea1(size=95.0 K), total size for store is 112.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T05:50:35,114 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8df67524193647b4367c9d2a0215dfcd: 2024-11-16T05:50:35,114 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd., storeName=8df67524193647b4367c9d2a0215dfcd/info, priority=13, startTime=1731736235077; duration=0sec 2024-11-16T05:50:35,115 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T05:50:35,115 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8df67524193647b4367c9d2a0215dfcd:info 2024-11-16T05:50:35,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:35,556 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:36,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:36,557 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:37,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46129 {}] regionserver.HRegion(8855): Flush requested on 8df67524193647b4367c9d2a0215dfcd 2024-11-16T05:50:37,100 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8df67524193647b4367c9d2a0215dfcd 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T05:50:37,104 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/a76c2034a75d4b609dcb7604f9198fe4 is 1080, key is row0159/info:/1731736235079/Put/seqid=0 2024-11-16T05:50:37,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741865_1041 (size=12516) 2024-11-16T05:50:37,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741865_1041 (size=12516) 2024-11-16T05:50:37,112 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=220 (bloomFilter=true), to=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/a76c2034a75d4b609dcb7604f9198fe4 2024-11-16T05:50:37,117 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/a76c2034a75d4b609dcb7604f9198fe4 as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/a76c2034a75d4b609dcb7604f9198fe4 2024-11-16T05:50:37,121 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/a76c2034a75d4b609dcb7604f9198fe4, entries=7, sequenceid=220, filesize=12.2 K 2024-11-16T05:50:37,122 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 8df67524193647b4367c9d2a0215dfcd in 22ms, sequenceid=220, compaction requested=true 2024-11-16T05:50:37,122 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8df67524193647b4367c9d2a0215dfcd: 2024-11-16T05:50:37,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8df67524193647b4367c9d2a0215dfcd:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T05:50:37,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T05:50:37,123 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T05:50:37,124 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 127655 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T05:50:37,124 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HStore(1541): 8df67524193647b4367c9d2a0215dfcd/info is initiating minor compaction (all files) 2024-11-16T05:50:37,124 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8df67524193647b4367c9d2a0215dfcd/info in TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd. 2024-11-16T05:50:37,124 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/3c8bc635c1cb49d584797a4a0fb32ea1, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/aa29879a2723459fb5651e7b77650522, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/a76c2034a75d4b609dcb7604f9198fe4] into tmpdir=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp, totalSize=124.7 K 2024-11-16T05:50:37,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46129 {}] regionserver.HRegion(8855): Flush requested on 8df67524193647b4367c9d2a0215dfcd 2024-11-16T05:50:37,125 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8df67524193647b4367c9d2a0215dfcd 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-16T05:50:37,125 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3c8bc635c1cb49d584797a4a0fb32ea1, keycount=85, bloomtype=ROW, size=95.0 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1731736208798 2024-11-16T05:50:37,125 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.Compactor(225): Compacting aa29879a2723459fb5651e7b77650522, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1731736235052 2024-11-16T05:50:37,126 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.Compactor(225): Compacting a76c2034a75d4b609dcb7604f9198fe4, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1731736235079 2024-11-16T05:50:37,129 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/ef3a0b1089a8414fa0a217ccc40a2c4a is 1080, key is row0166/info:/1731736237101/Put/seqid=0 2024-11-16T05:50:37,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741866_1042 (size=17906) 2024-11-16T05:50:37,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741866_1042 (size=17906) 2024-11-16T05:50:37,134 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/ef3a0b1089a8414fa0a217ccc40a2c4a 2024-11-16T05:50:37,138 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8df67524193647b4367c9d2a0215dfcd#info#compaction#81 average throughput is 35.57 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T05:50:37,139 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/c547a8bff3184415a86bb90ba0d03aca is 1080, key is row0062/info:/1731736208798/Put/seqid=0 2024-11-16T05:50:37,141 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/ef3a0b1089a8414fa0a217ccc40a2c4a as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/ef3a0b1089a8414fa0a217ccc40a2c4a 2024-11-16T05:50:37,146 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/ef3a0b1089a8414fa0a217ccc40a2c4a, entries=12, sequenceid=235, filesize=17.5 K 2024-11-16T05:50:37,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741867_1043 (size=117821) 2024-11-16T05:50:37,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741867_1043 (size=117821) 2024-11-16T05:50:37,148 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for 8df67524193647b4367c9d2a0215dfcd in 22ms, sequenceid=235, compaction requested=false 2024-11-16T05:50:37,148 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8df67524193647b4367c9d2a0215dfcd: 2024-11-16T05:50:37,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46129 {}] regionserver.HRegion(8855): Flush requested on 8df67524193647b4367c9d2a0215dfcd 2024-11-16T05:50:37,149 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8df67524193647b4367c9d2a0215dfcd 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-16T05:50:37,153 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/c547a8bff3184415a86bb90ba0d03aca as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/c547a8bff3184415a86bb90ba0d03aca 2024-11-16T05:50:37,154 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/aa4d71c895a949b48768220a43fcc049 is 1080, key is row0178/info:/1731736237126/Put/seqid=0 2024-11-16T05:50:37,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741868_1044 (size=17906) 2024-11-16T05:50:37,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741868_1044 (size=17906) 2024-11-16T05:50:37,159 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/aa4d71c895a949b48768220a43fcc049 2024-11-16T05:50:37,159 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8df67524193647b4367c9d2a0215dfcd/info of 8df67524193647b4367c9d2a0215dfcd into c547a8bff3184415a86bb90ba0d03aca(size=115.1 K), total size for store is 132.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T05:50:37,160 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8df67524193647b4367c9d2a0215dfcd: 2024-11-16T05:50:37,160 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd., storeName=8df67524193647b4367c9d2a0215dfcd/info, priority=13, startTime=1731736237123; duration=0sec 2024-11-16T05:50:37,160 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T05:50:37,160 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8df67524193647b4367c9d2a0215dfcd:info 2024-11-16T05:50:37,164 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/aa4d71c895a949b48768220a43fcc049 as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/aa4d71c895a949b48768220a43fcc049 2024-11-16T05:50:37,168 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/aa4d71c895a949b48768220a43fcc049, entries=12, sequenceid=250, filesize=17.5 K 2024-11-16T05:50:37,169 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=3.15 KB/3228 for 8df67524193647b4367c9d2a0215dfcd in 20ms, sequenceid=250, compaction requested=true 2024-11-16T05:50:37,169 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8df67524193647b4367c9d2a0215dfcd: 2024-11-16T05:50:37,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8df67524193647b4367c9d2a0215dfcd:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T05:50:37,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T05:50:37,169 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T05:50:37,170 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 153633 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T05:50:37,170 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.HStore(1541): 8df67524193647b4367c9d2a0215dfcd/info is initiating minor compaction (all files) 2024-11-16T05:50:37,171 INFO [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8df67524193647b4367c9d2a0215dfcd/info in TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd. 2024-11-16T05:50:37,171 INFO [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/c547a8bff3184415a86bb90ba0d03aca, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/ef3a0b1089a8414fa0a217ccc40a2c4a, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/aa4d71c895a949b48768220a43fcc049] into tmpdir=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp, totalSize=150.0 K 2024-11-16T05:50:37,171 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] compactions.Compactor(225): Compacting c547a8bff3184415a86bb90ba0d03aca, keycount=104, bloomtype=ROW, size=115.1 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1731736208798 2024-11-16T05:50:37,171 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] compactions.Compactor(225): Compacting ef3a0b1089a8414fa0a217ccc40a2c4a, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1731736237101 2024-11-16T05:50:37,172 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] compactions.Compactor(225): Compacting aa4d71c895a949b48768220a43fcc049, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1731736237126 2024-11-16T05:50:37,182 INFO [RS:0;3456ee6a3164:46129-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8df67524193647b4367c9d2a0215dfcd#info#compaction#83 average throughput is 65.67 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T05:50:37,182 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/3c69384f6f464474853e15b717794a2f is 1080, key is row0062/info:/1731736208798/Put/seqid=0 2024-11-16T05:50:37,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741869_1045 (size=144000) 2024-11-16T05:50:37,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741869_1045 (size=144000) 2024-11-16T05:50:37,190 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/3c69384f6f464474853e15b717794a2f as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/3c69384f6f464474853e15b717794a2f 2024-11-16T05:50:37,196 INFO [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8df67524193647b4367c9d2a0215dfcd/info of 8df67524193647b4367c9d2a0215dfcd into 3c69384f6f464474853e15b717794a2f(size=140.6 K), total size for store is 140.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T05:50:37,196 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8df67524193647b4367c9d2a0215dfcd: 2024-11-16T05:50:37,196 INFO [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd., storeName=8df67524193647b4367c9d2a0215dfcd/info, priority=13, startTime=1731736237169; duration=0sec 2024-11-16T05:50:37,196 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T05:50:37,196 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8df67524193647b4367c9d2a0215dfcd:info 2024-11-16T05:50:37,274 INFO [master/3456ee6a3164:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-16T05:50:37,274 INFO [master/3456ee6a3164:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-16T05:50:37,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:37,558 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:38,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:38,559 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:39,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46129 {}] regionserver.HRegion(8855): Flush requested on 8df67524193647b4367c9d2a0215dfcd 2024-11-16T05:50:39,171 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8df67524193647b4367c9d2a0215dfcd 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T05:50:39,175 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/a029ecceb00a418e833b47896873fd29 is 1080, key is row0190/info:/1731736237151/Put/seqid=0 2024-11-16T05:50:39,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741870_1046 (size=12520) 2024-11-16T05:50:39,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741870_1046 (size=12520) 2024-11-16T05:50:39,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46129 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=8df67524193647b4367c9d2a0215dfcd, server=3456ee6a3164,46129,1731736195700 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-16T05:50:39,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46129 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:57954 deadline: 1731736249206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=8df67524193647b4367c9d2a0215dfcd, server=3456ee6a3164,46129,1731736195700 2024-11-16T05:50:39,207 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd., hostname=3456ee6a3164,46129,1731736195700, seqNum=131 , the old value is region=TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd., hostname=3456ee6a3164,46129,1731736195700, seqNum=131, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=8df67524193647b4367c9d2a0215dfcd, server=3456ee6a3164,46129,1731736195700 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T05:50:39,207 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd., hostname=3456ee6a3164,46129,1731736195700, seqNum=131 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=8df67524193647b4367c9d2a0215dfcd, server=3456ee6a3164,46129,1731736195700 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T05:50:39,207 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd., hostname=3456ee6a3164,46129,1731736195700, seqNum=131 because the exception is null or not the one we care about 2024-11-16T05:50:39,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:39,560 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:39,583 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/a029ecceb00a418e833b47896873fd29 2024-11-16T05:50:39,593 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/a029ecceb00a418e833b47896873fd29 as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/a029ecceb00a418e833b47896873fd29 2024-11-16T05:50:39,598 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/a029ecceb00a418e833b47896873fd29, entries=7, sequenceid=262, filesize=12.2 K 2024-11-16T05:50:39,599 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 8df67524193647b4367c9d2a0215dfcd in 428ms, sequenceid=262, compaction requested=false 2024-11-16T05:50:39,599 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8df67524193647b4367c9d2a0215dfcd: 2024-11-16T05:50:40,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:40,561 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:41,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:41,472 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-11-16T05:50:41,562 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:42,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:42,563 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:43,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:43,564 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:44,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:44,565 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:45,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:45,566 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:46,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:46,567 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:47,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:47,567 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:48,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:48,568 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:49,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46129 {}] regionserver.HRegion(8855): Flush requested on 8df67524193647b4367c9d2a0215dfcd 2024-11-16T05:50:49,260 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8df67524193647b4367c9d2a0215dfcd 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-16T05:50:49,266 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/018502a126644b1ead51949ea1d7422b is 1080, key is row0197/info:/1731736239172/Put/seqid=0 2024-11-16T05:50:49,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741871_1047 (size=29807) 2024-11-16T05:50:49,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741871_1047 (size=29807) 2024-11-16T05:50:49,288 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/018502a126644b1ead51949ea1d7422b 2024-11-16T05:50:49,293 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/018502a126644b1ead51949ea1d7422b as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/018502a126644b1ead51949ea1d7422b 2024-11-16T05:50:49,298 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/018502a126644b1ead51949ea1d7422b, entries=23, sequenceid=288, filesize=29.1 K 2024-11-16T05:50:49,299 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=5.25 KB/5380 for 8df67524193647b4367c9d2a0215dfcd in 39ms, sequenceid=288, compaction requested=true 2024-11-16T05:50:49,299 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8df67524193647b4367c9d2a0215dfcd: 2024-11-16T05:50:49,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8df67524193647b4367c9d2a0215dfcd:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T05:50:49,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T05:50:49,299 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T05:50:49,301 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 186327 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T05:50:49,301 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HStore(1541): 8df67524193647b4367c9d2a0215dfcd/info is initiating minor compaction (all files) 2024-11-16T05:50:49,301 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8df67524193647b4367c9d2a0215dfcd/info in TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd. 2024-11-16T05:50:49,301 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/3c69384f6f464474853e15b717794a2f, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/a029ecceb00a418e833b47896873fd29, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/018502a126644b1ead51949ea1d7422b] into tmpdir=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp, totalSize=182.0 K 2024-11-16T05:50:49,301 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3c69384f6f464474853e15b717794a2f, keycount=128, bloomtype=ROW, size=140.6 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1731736208798 2024-11-16T05:50:49,302 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.Compactor(225): Compacting a029ecceb00a418e833b47896873fd29, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1731736237151 2024-11-16T05:50:49,302 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] compactions.Compactor(225): Compacting 018502a126644b1ead51949ea1d7422b, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1731736239172 2024-11-16T05:50:49,317 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8df67524193647b4367c9d2a0215dfcd#info#compaction#86 average throughput is 40.53 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T05:50:49,318 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/b329995b43c74d9d8d7fc55d2e34d1a3 is 1080, key is row0062/info:/1731736208798/Put/seqid=0 2024-11-16T05:50:49,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741872_1048 (size=176477) 2024-11-16T05:50:49,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741872_1048 (size=176477) 2024-11-16T05:50:49,327 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/b329995b43c74d9d8d7fc55d2e34d1a3 as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/b329995b43c74d9d8d7fc55d2e34d1a3 2024-11-16T05:50:49,334 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8df67524193647b4367c9d2a0215dfcd/info of 8df67524193647b4367c9d2a0215dfcd into b329995b43c74d9d8d7fc55d2e34d1a3(size=172.3 K), total size for store is 172.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T05:50:49,334 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8df67524193647b4367c9d2a0215dfcd: 2024-11-16T05:50:49,334 INFO [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd., storeName=8df67524193647b4367c9d2a0215dfcd/info, priority=13, startTime=1731736249299; duration=0sec 2024-11-16T05:50:49,334 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T05:50:49,334 DEBUG [RS:0;3456ee6a3164:46129-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8df67524193647b4367c9d2a0215dfcd:info 2024-11-16T05:50:49,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:49,569 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:50,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:50,570 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:51,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46129 {}] regionserver.HRegion(8855): Flush requested on 8df67524193647b4367c9d2a0215dfcd 2024-11-16T05:50:51,282 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8df67524193647b4367c9d2a0215dfcd 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T05:50:51,288 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/6acf658db9a044a490b85934c2a9237b is 1080, key is row0220/info:/1731736249262/Put/seqid=0 2024-11-16T05:50:51,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741873_1049 (size=12523) 2024-11-16T05:50:51,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741873_1049 (size=12523) 2024-11-16T05:50:51,293 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/6acf658db9a044a490b85934c2a9237b 2024-11-16T05:50:51,298 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/6acf658db9a044a490b85934c2a9237b as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/6acf658db9a044a490b85934c2a9237b 2024-11-16T05:50:51,303 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/6acf658db9a044a490b85934c2a9237b, entries=7, sequenceid=299, filesize=12.2 K 2024-11-16T05:50:51,304 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 8df67524193647b4367c9d2a0215dfcd in 22ms, sequenceid=299, compaction requested=false 2024-11-16T05:50:51,304 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8df67524193647b4367c9d2a0215dfcd: 2024-11-16T05:50:51,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46129 {}] regionserver.HRegion(8855): Flush requested on 8df67524193647b4367c9d2a0215dfcd 2024-11-16T05:50:51,306 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8df67524193647b4367c9d2a0215dfcd 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-16T05:50:51,310 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/c7a38c1d08434006841b416d705ebd7d is 1080, key is row0227/info:/1731736251285/Put/seqid=0 2024-11-16T05:50:51,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741874_1050 (size=17918) 2024-11-16T05:50:51,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741874_1050 (size=17918) 2024-11-16T05:50:51,319 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/c7a38c1d08434006841b416d705ebd7d 2024-11-16T05:50:51,324 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/c7a38c1d08434006841b416d705ebd7d as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/c7a38c1d08434006841b416d705ebd7d 2024-11-16T05:50:51,329 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/c7a38c1d08434006841b416d705ebd7d, entries=12, sequenceid=314, filesize=17.5 K 2024-11-16T05:50:51,330 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=12.61 KB/12912 for 8df67524193647b4367c9d2a0215dfcd in 24ms, sequenceid=314, compaction requested=true 2024-11-16T05:50:51,330 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8df67524193647b4367c9d2a0215dfcd: 2024-11-16T05:50:51,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46129 {}] regionserver.HRegion(8855): Flush requested on 8df67524193647b4367c9d2a0215dfcd 2024-11-16T05:50:51,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8df67524193647b4367c9d2a0215dfcd:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T05:50:51,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T05:50:51,330 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T05:50:51,330 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8df67524193647b4367c9d2a0215dfcd 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-16T05:50:51,331 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 206918 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T05:50:51,331 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.HStore(1541): 8df67524193647b4367c9d2a0215dfcd/info is initiating minor compaction (all files) 2024-11-16T05:50:51,332 INFO [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8df67524193647b4367c9d2a0215dfcd/info in TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd. 2024-11-16T05:50:51,332 INFO [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/b329995b43c74d9d8d7fc55d2e34d1a3, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/6acf658db9a044a490b85934c2a9237b, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/c7a38c1d08434006841b416d705ebd7d] into tmpdir=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp, totalSize=202.1 K 2024-11-16T05:50:51,332 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] compactions.Compactor(225): Compacting b329995b43c74d9d8d7fc55d2e34d1a3, keycount=158, bloomtype=ROW, size=172.3 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1731736208798 2024-11-16T05:50:51,332 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] compactions.Compactor(225): Compacting 6acf658db9a044a490b85934c2a9237b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1731736249262 2024-11-16T05:50:51,333 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] compactions.Compactor(225): Compacting c7a38c1d08434006841b416d705ebd7d, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1731736251285 2024-11-16T05:50:51,333 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/9a8558cb6dba42c98c7d3656eef34eb8 is 1080, key is row0239/info:/1731736251307/Put/seqid=0 2024-11-16T05:50:51,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741875_1051 (size=19013) 2024-11-16T05:50:51,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741875_1051 (size=19013) 2024-11-16T05:50:51,345 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/9a8558cb6dba42c98c7d3656eef34eb8 2024-11-16T05:50:51,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:51,348 INFO [RS:0;3456ee6a3164:46129-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8df67524193647b4367c9d2a0215dfcd#info#compaction#90 average throughput is 60.54 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T05:50:51,349 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/daf4481b22b14f8aae8f85bd6a9a200c is 1080, key is row0062/info:/1731736208798/Put/seqid=0 2024-11-16T05:50:51,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741876_1052 (size=197084) 2024-11-16T05:50:51,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741876_1052 (size=197084) 2024-11-16T05:50:51,352 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/9a8558cb6dba42c98c7d3656eef34eb8 as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/9a8558cb6dba42c98c7d3656eef34eb8 2024-11-16T05:50:51,356 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/daf4481b22b14f8aae8f85bd6a9a200c as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/daf4481b22b14f8aae8f85bd6a9a200c 2024-11-16T05:50:51,357 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/9a8558cb6dba42c98c7d3656eef34eb8, entries=13, sequenceid=330, filesize=18.6 K 2024-11-16T05:50:51,358 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=5.25 KB/5380 for 8df67524193647b4367c9d2a0215dfcd in 28ms, sequenceid=330, compaction requested=false 2024-11-16T05:50:51,358 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8df67524193647b4367c9d2a0215dfcd: 2024-11-16T05:50:51,362 INFO [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8df67524193647b4367c9d2a0215dfcd/info of 8df67524193647b4367c9d2a0215dfcd into daf4481b22b14f8aae8f85bd6a9a200c(size=192.5 K), total size for store is 211.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T05:50:51,362 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8df67524193647b4367c9d2a0215dfcd: 2024-11-16T05:50:51,362 INFO [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd., storeName=8df67524193647b4367c9d2a0215dfcd/info, priority=13, startTime=1731736251330; duration=0sec 2024-11-16T05:50:51,362 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T05:50:51,362 DEBUG [RS:0;3456ee6a3164:46129-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8df67524193647b4367c9d2a0215dfcd:info 2024-11-16T05:50:51,570 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:52,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:52,572 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:53,343 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-16T05:50:53,344 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C46129%2C1731736195700.1731736253344 2024-11-16T05:50:53,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:53,356 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:53,356 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:53,356 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:53,356 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:53,356 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:53,356 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/WALs/3456ee6a3164,46129,1731736195700/3456ee6a3164%2C46129%2C1731736195700.1731736196085 with entries=315, filesize=309.33 KB; new WAL /user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/WALs/3456ee6a3164,46129,1731736195700/3456ee6a3164%2C46129%2C1731736195700.1731736253344 2024-11-16T05:50:53,357 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34923:34923),(127.0.0.1/127.0.0.1:45045:45045)] 2024-11-16T05:50:53,357 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/WALs/3456ee6a3164,46129,1731736195700/3456ee6a3164%2C46129%2C1731736195700.1731736196085 is not closed yet, will try archiving it next time 2024-11-16T05:50:53,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741833_1009 (size=316763) 2024-11-16T05:50:53,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741833_1009 (size=316763) 2024-11-16T05:50:53,362 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-11-16T05:50:53,366 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/hbase/meta/1588230740/.tmp/info/faf38dab0db447429f666cfbb4515648 is 186, key is TestLogRolling-testLogRolling,,1731736210898.6460758d797e29642734b0c09718591d./info:regioninfo/1731736211645/Put/seqid=0 2024-11-16T05:50:53,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741878_1054 (size=6153) 2024-11-16T05:50:53,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741878_1054 (size=6153) 2024-11-16T05:50:53,370 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/hbase/meta/1588230740/.tmp/info/faf38dab0db447429f666cfbb4515648 2024-11-16T05:50:53,376 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/hbase/meta/1588230740/.tmp/info/faf38dab0db447429f666cfbb4515648 as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/hbase/meta/1588230740/info/faf38dab0db447429f666cfbb4515648 2024-11-16T05:50:53,381 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/hbase/meta/1588230740/info/faf38dab0db447429f666cfbb4515648, entries=5, sequenceid=21, filesize=6.0 K 2024-11-16T05:50:53,382 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 20ms, sequenceid=21, compaction requested=false 2024-11-16T05:50:53,382 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-16T05:50:53,382 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 6460758d797e29642734b0c09718591d: 2024-11-16T05:50:53,382 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 8df67524193647b4367c9d2a0215dfcd 1/1 column families, dataSize=5.25 KB heapSize=5.88 KB 2024-11-16T05:50:53,386 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/748fd2ee8f0e4af587984bd2e6a20b43 is 1080, key is row0252/info:/1731736251331/Put/seqid=0 2024-11-16T05:50:53,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741879_1055 (size=10357) 2024-11-16T05:50:53,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741879_1055 (size=10357) 2024-11-16T05:50:53,391 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.25 KB at sequenceid=339 (bloomFilter=true), to=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/748fd2ee8f0e4af587984bd2e6a20b43 2024-11-16T05:50:53,396 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/.tmp/info/748fd2ee8f0e4af587984bd2e6a20b43 as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/748fd2ee8f0e4af587984bd2e6a20b43 2024-11-16T05:50:53,400 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/748fd2ee8f0e4af587984bd2e6a20b43, entries=5, sequenceid=339, filesize=10.1 K 2024-11-16T05:50:53,401 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.25 KB/5380, heapSize ~5.86 KB/6000, currentSize=0 B/0 for 8df67524193647b4367c9d2a0215dfcd in 19ms, sequenceid=339, compaction requested=true 2024-11-16T05:50:53,401 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 8df67524193647b4367c9d2a0215dfcd: 2024-11-16T05:50:53,401 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C46129%2C1731736195700.1731736253401 2024-11-16T05:50:53,405 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:53,405 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:53,405 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:53,406 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:53,406 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:53,406 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/WALs/3456ee6a3164,46129,1731736195700/3456ee6a3164%2C46129%2C1731736195700.1731736253344 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/WALs/3456ee6a3164,46129,1731736195700/3456ee6a3164%2C46129%2C1731736195700.1731736253401 2024-11-16T05:50:53,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741877_1053 (size=731) 2024-11-16T05:50:53,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741877_1053 (size=731) 2024-11-16T05:50:53,409 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/WALs/3456ee6a3164,46129,1731736195700/3456ee6a3164%2C46129%2C1731736195700.1731736196085 to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/oldWALs/3456ee6a3164%2C46129%2C1731736195700.1731736196085 2024-11-16T05:50:53,410 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/WALs/3456ee6a3164,46129,1731736195700/3456ee6a3164%2C46129%2C1731736195700.1731736253344 to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/oldWALs/3456ee6a3164%2C46129%2C1731736195700.1731736253344 2024-11-16T05:50:53,411 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45045:45045),(127.0.0.1/127.0.0.1:34923:34923)] 2024-11-16T05:50:53,411 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T05:50:53,411 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T05:50:53,412 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T05:50:53,412 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T05:50:53,412 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:50:53,412 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:50:53,412 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T05:50:53,412 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T05:50:53,412 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=767660863, stopped=false 2024-11-16T05:50:53,412 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3456ee6a3164,40857,1731736195655 2024-11-16T05:50:53,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46129-0x1004714aea50001, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T05:50:53,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40857-0x1004714aea50000, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T05:50:53,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46129-0x1004714aea50001, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:50:53,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40857-0x1004714aea50000, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:50:53,414 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T05:50:53,414 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T05:50:53,414 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T05:50:53,414 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:50:53,414 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40857-0x1004714aea50000, quorum=127.0.0.1:50868, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T05:50:53,414 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3456ee6a3164,46129,1731736195700' ***** 2024-11-16T05:50:53,414 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T05:50:53,415 INFO [RS:0;3456ee6a3164:46129 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T05:50:53,415 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T05:50:53,415 INFO [RS:0;3456ee6a3164:46129 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T05:50:53,415 INFO [RS:0;3456ee6a3164:46129 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T05:50:53,415 INFO [RS:0;3456ee6a3164:46129 {}] regionserver.HRegionServer(3091): Received CLOSE for 6460758d797e29642734b0c09718591d 2024-11-16T05:50:53,415 INFO [RS:0;3456ee6a3164:46129 {}] regionserver.HRegionServer(3091): Received CLOSE for 8df67524193647b4367c9d2a0215dfcd 2024-11-16T05:50:53,415 INFO [RS:0;3456ee6a3164:46129 {}] regionserver.HRegionServer(959): stopping server 3456ee6a3164,46129,1731736195700 2024-11-16T05:50:53,415 INFO [RS:0;3456ee6a3164:46129 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T05:50:53,415 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 6460758d797e29642734b0c09718591d, disabling compactions & flushes 2024-11-16T05:50:53,415 INFO [RS:0;3456ee6a3164:46129 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3456ee6a3164:46129. 2024-11-16T05:50:53,415 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731736210898.6460758d797e29642734b0c09718591d. 2024-11-16T05:50:53,415 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731736210898.6460758d797e29642734b0c09718591d. 2024-11-16T05:50:53,415 DEBUG [RS:0;3456ee6a3164:46129 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T05:50:53,415 DEBUG [RS:0;3456ee6a3164:46129 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:50:53,415 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731736210898.6460758d797e29642734b0c09718591d. after waiting 0 ms 2024-11-16T05:50:53,415 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731736210898.6460758d797e29642734b0c09718591d. 2024-11-16T05:50:53,415 INFO [RS:0;3456ee6a3164:46129 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T05:50:53,416 INFO [RS:0;3456ee6a3164:46129 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T05:50:53,416 INFO [RS:0;3456ee6a3164:46129 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T05:50:53,416 INFO [RS:0;3456ee6a3164:46129 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T05:50:53,416 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46129-0x1004714aea50001, quorum=127.0.0.1:50868, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T05:50:53,417 INFO [RS:0;3456ee6a3164:46129 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-16T05:50:53,417 DEBUG [RS:0;3456ee6a3164:46129 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 6460758d797e29642734b0c09718591d=TestLogRolling-testLogRolling,,1731736210898.6460758d797e29642734b0c09718591d., 8df67524193647b4367c9d2a0215dfcd=TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd.} 2024-11-16T05:50:53,417 DEBUG [RS:0;3456ee6a3164:46129 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 6460758d797e29642734b0c09718591d, 8df67524193647b4367c9d2a0215dfcd 2024-11-16T05:50:53,417 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T05:50:53,417 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T05:50:53,417 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T05:50:53,417 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T05:50:53,417 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T05:50:53,417 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731736210898.6460758d797e29642734b0c09718591d.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/6460758d797e29642734b0c09718591d/info/bffa10a0290f42da80072afe62603ff4.ee760dfd2eb02f6e118668b4ae1eee46->hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/bffa10a0290f42da80072afe62603ff4-bottom] to archive 2024-11-16T05:50:53,418 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731736210898.6460758d797e29642734b0c09718591d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T05:50:53,420 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731736210898.6460758d797e29642734b0c09718591d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/6460758d797e29642734b0c09718591d/info/bffa10a0290f42da80072afe62603ff4.ee760dfd2eb02f6e118668b4ae1eee46 to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/archive/data/default/TestLogRolling-testLogRolling/6460758d797e29642734b0c09718591d/info/bffa10a0290f42da80072afe62603ff4.ee760dfd2eb02f6e118668b4ae1eee46 2024-11-16T05:50:53,420 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731736210898.6460758d797e29642734b0c09718591d.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=3456ee6a3164:40857 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-16T05:50:53,420 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731736210898.6460758d797e29642734b0c09718591d.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-16T05:50:53,421 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-16T05:50:53,422 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T05:50:53,422 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T05:50:53,422 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731736253417Running coprocessor pre-close hooks at 1731736253417Disabling compacts and flushes for region at 1731736253417Disabling writes for close at 1731736253417Writing region close event to WAL at 1731736253418 (+1 ms)Running coprocessor post-close hooks at 1731736253422 (+4 ms)Closed at 1731736253422 2024-11-16T05:50:53,422 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T05:50:53,423 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/6460758d797e29642734b0c09718591d/recovered.edits/135.seqid, newMaxSeqId=135, maxSeqId=130 2024-11-16T05:50:53,424 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731736210898.6460758d797e29642734b0c09718591d. 2024-11-16T05:50:53,424 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 6460758d797e29642734b0c09718591d: Waiting for close lock at 1731736253415Running coprocessor pre-close hooks at 1731736253415Disabling compacts and flushes for region at 1731736253415Disabling writes for close at 1731736253415Writing region close event to WAL at 1731736253420 (+5 ms)Running coprocessor post-close hooks at 1731736253424 (+4 ms)Closed at 1731736253424 2024-11-16T05:50:53,424 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731736210898.6460758d797e29642734b0c09718591d. 2024-11-16T05:50:53,424 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 8df67524193647b4367c9d2a0215dfcd, disabling compactions & flushes 2024-11-16T05:50:53,424 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd. 2024-11-16T05:50:53,424 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd. 2024-11-16T05:50:53,424 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd. after waiting 0 ms 2024-11-16T05:50:53,424 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd. 2024-11-16T05:50:53,425 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/bffa10a0290f42da80072afe62603ff4.ee760dfd2eb02f6e118668b4ae1eee46->hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/ee760dfd2eb02f6e118668b4ae1eee46/info/bffa10a0290f42da80072afe62603ff4-top, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/TestLogRolling-testLogRolling=ee760dfd2eb02f6e118668b4ae1eee46-a4cb54d12dc54c8e90be29a4406c1223, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/6e480ed4bd9444f886a3645c8defe2ad, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/TestLogRolling-testLogRolling=ee760dfd2eb02f6e118668b4ae1eee46-c007e7ce50644532adffb8f46e06efd8, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/3b5d5a58f3054f4e84db09c414a97ed5, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/aaa2f7ee808440c9872b2f7557597c28, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/bdf0e6c1766445d0935e116cd59b2087, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/49c162b5e6c347b4a9fd5fddaee9dc22, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/3c8bc635c1cb49d584797a4a0fb32ea1, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/d3aafbecbac642b0b7a9b82efbb50ebc, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/aa29879a2723459fb5651e7b77650522, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/c547a8bff3184415a86bb90ba0d03aca, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/a76c2034a75d4b609dcb7604f9198fe4, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/ef3a0b1089a8414fa0a217ccc40a2c4a, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/3c69384f6f464474853e15b717794a2f, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/aa4d71c895a949b48768220a43fcc049, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/a029ecceb00a418e833b47896873fd29, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/b329995b43c74d9d8d7fc55d2e34d1a3, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/018502a126644b1ead51949ea1d7422b, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/6acf658db9a044a490b85934c2a9237b, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/c7a38c1d08434006841b416d705ebd7d] to archive 2024-11-16T05:50:53,426 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T05:50:53,427 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/bffa10a0290f42da80072afe62603ff4.ee760dfd2eb02f6e118668b4ae1eee46 to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/archive/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/bffa10a0290f42da80072afe62603ff4.ee760dfd2eb02f6e118668b4ae1eee46 2024-11-16T05:50:53,428 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/TestLogRolling-testLogRolling=ee760dfd2eb02f6e118668b4ae1eee46-a4cb54d12dc54c8e90be29a4406c1223 to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/archive/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/TestLogRolling-testLogRolling=ee760dfd2eb02f6e118668b4ae1eee46-a4cb54d12dc54c8e90be29a4406c1223 2024-11-16T05:50:53,430 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/6e480ed4bd9444f886a3645c8defe2ad to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/archive/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/6e480ed4bd9444f886a3645c8defe2ad 2024-11-16T05:50:53,431 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/TestLogRolling-testLogRolling=ee760dfd2eb02f6e118668b4ae1eee46-c007e7ce50644532adffb8f46e06efd8 to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/archive/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/TestLogRolling-testLogRolling=ee760dfd2eb02f6e118668b4ae1eee46-c007e7ce50644532adffb8f46e06efd8 2024-11-16T05:50:53,432 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/3b5d5a58f3054f4e84db09c414a97ed5 to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/archive/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/3b5d5a58f3054f4e84db09c414a97ed5 2024-11-16T05:50:53,433 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/aaa2f7ee808440c9872b2f7557597c28 to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/archive/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/aaa2f7ee808440c9872b2f7557597c28 2024-11-16T05:50:53,434 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/bdf0e6c1766445d0935e116cd59b2087 to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/archive/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/bdf0e6c1766445d0935e116cd59b2087 2024-11-16T05:50:53,435 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/49c162b5e6c347b4a9fd5fddaee9dc22 to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/archive/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/49c162b5e6c347b4a9fd5fddaee9dc22 2024-11-16T05:50:53,436 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/3c8bc635c1cb49d584797a4a0fb32ea1 to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/archive/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/3c8bc635c1cb49d584797a4a0fb32ea1 2024-11-16T05:50:53,436 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/d3aafbecbac642b0b7a9b82efbb50ebc to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/archive/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/d3aafbecbac642b0b7a9b82efbb50ebc 2024-11-16T05:50:53,437 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/aa29879a2723459fb5651e7b77650522 to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/archive/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/aa29879a2723459fb5651e7b77650522 2024-11-16T05:50:53,438 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/c547a8bff3184415a86bb90ba0d03aca to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/archive/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/c547a8bff3184415a86bb90ba0d03aca 2024-11-16T05:50:53,439 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/a76c2034a75d4b609dcb7604f9198fe4 to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/archive/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/a76c2034a75d4b609dcb7604f9198fe4 2024-11-16T05:50:53,440 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/ef3a0b1089a8414fa0a217ccc40a2c4a to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/archive/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/ef3a0b1089a8414fa0a217ccc40a2c4a 2024-11-16T05:50:53,440 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/3c69384f6f464474853e15b717794a2f to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/archive/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/3c69384f6f464474853e15b717794a2f 2024-11-16T05:50:53,441 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/aa4d71c895a949b48768220a43fcc049 to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/archive/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/aa4d71c895a949b48768220a43fcc049 2024-11-16T05:50:53,442 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/a029ecceb00a418e833b47896873fd29 to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/archive/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/a029ecceb00a418e833b47896873fd29 2024-11-16T05:50:53,443 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/b329995b43c74d9d8d7fc55d2e34d1a3 to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/archive/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/b329995b43c74d9d8d7fc55d2e34d1a3 2024-11-16T05:50:53,444 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/018502a126644b1ead51949ea1d7422b to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/archive/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/018502a126644b1ead51949ea1d7422b 2024-11-16T05:50:53,445 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/6acf658db9a044a490b85934c2a9237b to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/archive/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/6acf658db9a044a490b85934c2a9237b 2024-11-16T05:50:53,446 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/c7a38c1d08434006841b416d705ebd7d to hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/archive/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/info/c7a38c1d08434006841b416d705ebd7d 2024-11-16T05:50:53,446 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [6e480ed4bd9444f886a3645c8defe2ad=42984, 3b5d5a58f3054f4e84db09c414a97ed5=12516, aaa2f7ee808440c9872b2f7557597c28=64714, bdf0e6c1766445d0935e116cd59b2087=19000, 49c162b5e6c347b4a9fd5fddaee9dc22=19000, 3c8bc635c1cb49d584797a4a0fb32ea1=97233, d3aafbecbac642b0b7a9b82efbb50ebc=23316, aa29879a2723459fb5651e7b77650522=17906, c547a8bff3184415a86bb90ba0d03aca=117821, a76c2034a75d4b609dcb7604f9198fe4=12516, ef3a0b1089a8414fa0a217ccc40a2c4a=17906, 3c69384f6f464474853e15b717794a2f=144000, aa4d71c895a949b48768220a43fcc049=17906, a029ecceb00a418e833b47896873fd29=12520, b329995b43c74d9d8d7fc55d2e34d1a3=176477, 018502a126644b1ead51949ea1d7422b=29807, 6acf658db9a044a490b85934c2a9237b=12523, c7a38c1d08434006841b416d705ebd7d=17918] 2024-11-16T05:50:53,449 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/data/default/TestLogRolling-testLogRolling/8df67524193647b4367c9d2a0215dfcd/recovered.edits/342.seqid, newMaxSeqId=342, maxSeqId=130 2024-11-16T05:50:53,449 INFO [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd. 2024-11-16T05:50:53,450 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 8df67524193647b4367c9d2a0215dfcd: Waiting for close lock at 1731736253424Running coprocessor pre-close hooks at 1731736253424Disabling compacts and flushes for region at 1731736253424Disabling writes for close at 1731736253424Writing region close event to WAL at 1731736253446 (+22 ms)Running coprocessor post-close hooks at 1731736253449 (+3 ms)Closed at 1731736253449 2024-11-16T05:50:53,450 DEBUG [RS_CLOSE_REGION-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731736210898.8df67524193647b4367c9d2a0215dfcd. 2024-11-16T05:50:53,573 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:53,617 INFO [RS:0;3456ee6a3164:46129 {}] regionserver.HRegionServer(976): stopping server 3456ee6a3164,46129,1731736195700; all regions closed. 2024-11-16T05:50:53,618 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:53,618 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:53,618 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:53,618 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:53,618 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:53,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741834_1010 (size=8107) 2024-11-16T05:50:53,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741834_1010 (size=8107) 2024-11-16T05:50:53,622 DEBUG [RS:0;3456ee6a3164:46129 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/oldWALs 2024-11-16T05:50:53,622 INFO [RS:0;3456ee6a3164:46129 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3456ee6a3164%2C46129%2C1731736195700.meta:.meta(num 1731736196461) 2024-11-16T05:50:53,623 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:53,623 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:53,623 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:53,623 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:53,623 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:53,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741880_1056 (size=780) 2024-11-16T05:50:53,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741880_1056 (size=780) 2024-11-16T05:50:53,626 DEBUG [RS:0;3456ee6a3164:46129 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/oldWALs 2024-11-16T05:50:53,626 INFO [RS:0;3456ee6a3164:46129 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3456ee6a3164%2C46129%2C1731736195700:(num 1731736253401) 2024-11-16T05:50:53,626 DEBUG [RS:0;3456ee6a3164:46129 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:50:53,626 INFO [RS:0;3456ee6a3164:46129 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T05:50:53,627 INFO [RS:0;3456ee6a3164:46129 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T05:50:53,627 INFO [RS:0;3456ee6a3164:46129 {}] hbase.ChoreService(370): Chore service for: regionserver/3456ee6a3164:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-16T05:50:53,627 INFO [RS:0;3456ee6a3164:46129 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T05:50:53,627 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T05:50:53,627 INFO [RS:0;3456ee6a3164:46129 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46129 2024-11-16T05:50:53,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40857-0x1004714aea50000, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T05:50:53,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46129-0x1004714aea50001, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3456ee6a3164,46129,1731736195700 2024-11-16T05:50:53,628 INFO [RS:0;3456ee6a3164:46129 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T05:50:53,629 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3456ee6a3164,46129,1731736195700] 2024-11-16T05:50:53,629 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3456ee6a3164,46129,1731736195700 already deleted, retry=false 2024-11-16T05:50:53,629 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3456ee6a3164,46129,1731736195700 expired; onlineServers=0 2024-11-16T05:50:53,630 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3456ee6a3164,40857,1731736195655' ***** 2024-11-16T05:50:53,630 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T05:50:53,630 INFO [M:0;3456ee6a3164:40857 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T05:50:53,630 INFO [M:0;3456ee6a3164:40857 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T05:50:53,630 DEBUG [M:0;3456ee6a3164:40857 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T05:50:53,630 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T05:50:53,630 DEBUG [M:0;3456ee6a3164:40857 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T05:50:53,630 DEBUG [master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.large.0-1731736195849 {}] cleaner.HFileCleaner(306): Exit Thread[master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.large.0-1731736195849,5,FailOnTimeoutGroup] 2024-11-16T05:50:53,630 DEBUG [master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.small.0-1731736195849 {}] cleaner.HFileCleaner(306): Exit Thread[master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.small.0-1731736195849,5,FailOnTimeoutGroup] 2024-11-16T05:50:53,630 INFO [M:0;3456ee6a3164:40857 {}] hbase.ChoreService(370): Chore service for: master/3456ee6a3164:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T05:50:53,630 INFO [M:0;3456ee6a3164:40857 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T05:50:53,630 DEBUG [M:0;3456ee6a3164:40857 {}] master.HMaster(1795): Stopping service threads 2024-11-16T05:50:53,630 INFO [M:0;3456ee6a3164:40857 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T05:50:53,630 INFO [M:0;3456ee6a3164:40857 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T05:50:53,631 INFO [M:0;3456ee6a3164:40857 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T05:50:53,631 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T05:50:53,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40857-0x1004714aea50000, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T05:50:53,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40857-0x1004714aea50000, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:50:53,631 DEBUG [M:0;3456ee6a3164:40857 {}] zookeeper.ZKUtil(347): master:40857-0x1004714aea50000, quorum=127.0.0.1:50868, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T05:50:53,631 WARN [M:0;3456ee6a3164:40857 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T05:50:53,631 INFO [M:0;3456ee6a3164:40857 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/.lastflushedseqids 2024-11-16T05:50:53,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741881_1057 (size=228) 2024-11-16T05:50:53,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741881_1057 (size=228) 2024-11-16T05:50:53,637 INFO [M:0;3456ee6a3164:40857 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T05:50:53,637 INFO [M:0;3456ee6a3164:40857 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T05:50:53,637 DEBUG [M:0;3456ee6a3164:40857 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T05:50:53,637 INFO [M:0;3456ee6a3164:40857 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:50:53,637 DEBUG [M:0;3456ee6a3164:40857 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:50:53,637 DEBUG [M:0;3456ee6a3164:40857 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T05:50:53,637 DEBUG [M:0;3456ee6a3164:40857 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:50:53,637 INFO [M:0;3456ee6a3164:40857 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.36 KB 2024-11-16T05:50:53,653 DEBUG [M:0;3456ee6a3164:40857 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ce7c5689643149d29614b6053e9e8363 is 82, key is hbase:meta,,1/info:regioninfo/1731736196493/Put/seqid=0 2024-11-16T05:50:53,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741882_1058 (size=5672) 2024-11-16T05:50:53,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741882_1058 (size=5672) 2024-11-16T05:50:53,657 INFO [M:0;3456ee6a3164:40857 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ce7c5689643149d29614b6053e9e8363 2024-11-16T05:50:53,678 DEBUG [M:0;3456ee6a3164:40857 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a403145385744b25a0dfb7a3c4608556 is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731736197025/Put/seqid=0 2024-11-16T05:50:53,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741883_1059 (size=7089) 2024-11-16T05:50:53,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741883_1059 (size=7089) 2024-11-16T05:50:53,702 INFO [M:0;3456ee6a3164:40857 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a403145385744b25a0dfb7a3c4608556 2024-11-16T05:50:53,708 INFO [M:0;3456ee6a3164:40857 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a403145385744b25a0dfb7a3c4608556 2024-11-16T05:50:53,721 DEBUG [M:0;3456ee6a3164:40857 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dbf05c4da26a43c8967973460825c180 is 69, key is 3456ee6a3164,46129,1731736195700/rs:state/1731736195942/Put/seqid=0 2024-11-16T05:50:53,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741884_1060 (size=5156) 2024-11-16T05:50:53,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741884_1060 (size=5156) 2024-11-16T05:50:53,726 INFO [M:0;3456ee6a3164:40857 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dbf05c4da26a43c8967973460825c180 2024-11-16T05:50:53,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46129-0x1004714aea50001, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T05:50:53,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46129-0x1004714aea50001, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T05:50:53,729 INFO [RS:0;3456ee6a3164:46129 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T05:50:53,729 INFO [RS:0;3456ee6a3164:46129 {}] regionserver.HRegionServer(1031): Exiting; stopping=3456ee6a3164,46129,1731736195700; zookeeper connection closed. 2024-11-16T05:50:53,729 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2e5a12a8 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2e5a12a8 2024-11-16T05:50:53,730 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T05:50:53,752 DEBUG [M:0;3456ee6a3164:40857 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4b268532d70845bcb9c438d4c2114a1b is 52, key is load_balancer_on/state:d/1731736196630/Put/seqid=0 2024-11-16T05:50:53,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741885_1061 (size=5056) 2024-11-16T05:50:53,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741885_1061 (size=5056) 2024-11-16T05:50:53,758 INFO [M:0;3456ee6a3164:40857 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4b268532d70845bcb9c438d4c2114a1b 2024-11-16T05:50:53,764 DEBUG [M:0;3456ee6a3164:40857 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ce7c5689643149d29614b6053e9e8363 as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ce7c5689643149d29614b6053e9e8363 2024-11-16T05:50:53,770 INFO [M:0;3456ee6a3164:40857 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ce7c5689643149d29614b6053e9e8363, entries=8, sequenceid=125, filesize=5.5 K 2024-11-16T05:50:53,772 DEBUG [M:0;3456ee6a3164:40857 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a403145385744b25a0dfb7a3c4608556 as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a403145385744b25a0dfb7a3c4608556 2024-11-16T05:50:53,776 INFO [M:0;3456ee6a3164:40857 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a403145385744b25a0dfb7a3c4608556 2024-11-16T05:50:53,777 INFO [M:0;3456ee6a3164:40857 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a403145385744b25a0dfb7a3c4608556, entries=13, sequenceid=125, filesize=6.9 K 2024-11-16T05:50:53,778 DEBUG [M:0;3456ee6a3164:40857 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dbf05c4da26a43c8967973460825c180 as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/dbf05c4da26a43c8967973460825c180 2024-11-16T05:50:53,783 INFO [M:0;3456ee6a3164:40857 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/dbf05c4da26a43c8967973460825c180, entries=1, sequenceid=125, filesize=5.0 K 2024-11-16T05:50:53,784 DEBUG [M:0;3456ee6a3164:40857 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4b268532d70845bcb9c438d4c2114a1b as hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4b268532d70845bcb9c438d4c2114a1b 2024-11-16T05:50:53,789 INFO [M:0;3456ee6a3164:40857 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39651/user/jenkins/test-data/3021bd94-b59c-f0db-735d-590d98689869/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4b268532d70845bcb9c438d4c2114a1b, entries=1, sequenceid=125, filesize=4.9 K 2024-11-16T05:50:53,790 INFO [M:0;3456ee6a3164:40857 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 153ms, sequenceid=125, compaction requested=false 2024-11-16T05:50:53,791 INFO [M:0;3456ee6a3164:40857 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:50:53,791 DEBUG [M:0;3456ee6a3164:40857 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731736253637Disabling compacts and flushes for region at 1731736253637Disabling writes for close at 1731736253637Obtaining lock to block concurrent updates at 1731736253637Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731736253637Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64816, getOffHeapSize=0, getCellsCount=148 at 1731736253637Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731736253638 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731736253638Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731736253652 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731736253652Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731736253662 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731736253678 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731736253678Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731736253708 (+30 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731736253721 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731736253721Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731736253730 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731736253752 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731736253752Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@e8957a5: reopening flushed file at 1731736253763 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@79b87c4b: reopening flushed file at 1731736253771 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3ef93c93: reopening flushed file at 1731736253777 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4c6f6503: reopening flushed file at 1731736253783 (+6 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 153ms, sequenceid=125, compaction requested=false at 1731736253790 (+7 ms)Writing region close event to WAL at 1731736253791 (+1 ms)Closed at 1731736253791 2024-11-16T05:50:53,792 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:53,792 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:53,793 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:53,793 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:53,793 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:53,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741830_1006 (size=61320) 2024-11-16T05:50:53,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741830_1006 (size=61320) 2024-11-16T05:50:53,795 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T05:50:53,795 INFO [M:0;3456ee6a3164:40857 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T05:50:53,795 INFO [M:0;3456ee6a3164:40857 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40857 2024-11-16T05:50:53,796 INFO [M:0;3456ee6a3164:40857 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T05:50:53,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40857-0x1004714aea50000, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T05:50:53,898 INFO [M:0;3456ee6a3164:40857 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T05:50:53,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40857-0x1004714aea50000, quorum=127.0.0.1:50868, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T05:50:53,901 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@506744b5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:50:53,901 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7c0fa0ef{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T05:50:53,901 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T05:50:53,902 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a78718c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T05:50:53,902 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@26c07cbd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/hadoop.log.dir/,STOPPED} 2024-11-16T05:50:53,904 WARN [BP-314551495-172.17.0.2-1731736195079 heartbeating to localhost/127.0.0.1:39651 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T05:50:53,904 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T05:50:53,904 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T05:50:53,904 WARN [BP-314551495-172.17.0.2-1731736195079 heartbeating to localhost/127.0.0.1:39651 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-314551495-172.17.0.2-1731736195079 (Datanode Uuid ea03aaf1-ba5c-48cc-ac5a-cbf7f546781f) service to localhost/127.0.0.1:39651 2024-11-16T05:50:53,904 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/cluster_f441b136-a34d-58d2-7eaa-cb3df2d592f5/data/data3/current/BP-314551495-172.17.0.2-1731736195079 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:50:53,905 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/cluster_f441b136-a34d-58d2-7eaa-cb3df2d592f5/data/data4/current/BP-314551495-172.17.0.2-1731736195079 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:50:53,905 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T05:50:53,914 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@624c2d5a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:50:53,914 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3d299317{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T05:50:53,914 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T05:50:53,914 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6475f7bc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T05:50:53,914 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f3d0298{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/hadoop.log.dir/,STOPPED} 2024-11-16T05:50:53,917 WARN [BP-314551495-172.17.0.2-1731736195079 heartbeating to localhost/127.0.0.1:39651 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T05:50:53,917 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T05:50:53,917 WARN [BP-314551495-172.17.0.2-1731736195079 heartbeating to localhost/127.0.0.1:39651 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-314551495-172.17.0.2-1731736195079 (Datanode Uuid 53f1d793-59d3-41db-a262-24c1bd191ae5) service to localhost/127.0.0.1:39651 2024-11-16T05:50:53,917 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T05:50:53,917 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/cluster_f441b136-a34d-58d2-7eaa-cb3df2d592f5/data/data1/current/BP-314551495-172.17.0.2-1731736195079 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:50:53,918 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/cluster_f441b136-a34d-58d2-7eaa-cb3df2d592f5/data/data2/current/BP-314551495-172.17.0.2-1731736195079 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:50:53,918 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T05:50:53,925 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7596208e{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T05:50:53,926 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@44ecb50d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T05:50:53,926 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T05:50:53,926 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1488736e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T05:50:53,926 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7faafa6f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/hadoop.log.dir/,STOPPED} 2024-11-16T05:50:53,935 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T05:50:53,956 INFO [regionserver/3456ee6a3164:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T05:50:53,968 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T05:50:53,978 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=231 (was 208) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39651 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1436593709) connection to localhost/127.0.0.1:39651 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39651 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39651 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39651 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1436593709) connection to localhost/127.0.0.1:39651 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:39651 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1436593709) connection to localhost/127.0.0.1:39651 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=515 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=166 (was 127) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2880 (was 3019) 2024-11-16T05:50:53,987 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=231, OpenFileDescriptor=515, MaxFileDescriptor=1048576, SystemLoadAverage=166, ProcessCount=11, AvailableMemoryMB=2880 2024-11-16T05:50:53,987 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T05:50:53,987 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/hadoop.log.dir so I do NOT create it in target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930 2024-11-16T05:50:53,987 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/18e9eeef-221e-9da5-d088-03cd7c6a574e/hadoop.tmp.dir so I do NOT create it in target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930 2024-11-16T05:50:53,988 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/cluster_45e65205-c932-f817-7b85-9d115d338e0a, deleteOnExit=true 2024-11-16T05:50:53,988 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T05:50:53,988 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/test.cache.data in system properties and HBase conf 2024-11-16T05:50:53,988 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T05:50:53,988 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/hadoop.log.dir in system properties and HBase conf 2024-11-16T05:50:53,988 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T05:50:53,988 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T05:50:53,988 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T05:50:53,988 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T05:50:53,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T05:50:53,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T05:50:53,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T05:50:53,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T05:50:53,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T05:50:53,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T05:50:53,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T05:50:53,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T05:50:53,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T05:50:53,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/nfs.dump.dir in system properties and HBase conf 2024-11-16T05:50:53,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/java.io.tmpdir in system properties and HBase conf 2024-11-16T05:50:53,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T05:50:53,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T05:50:53,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T05:50:54,002 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T05:50:54,044 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:50:54,047 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T05:50:54,048 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T05:50:54,048 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T05:50:54,048 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T05:50:54,049 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:50:54,049 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@52e40e10{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/hadoop.log.dir/,AVAILABLE} 2024-11-16T05:50:54,049 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@58c8824c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T05:50:54,154 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4a619478{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/java.io.tmpdir/jetty-localhost-33575-hadoop-hdfs-3_4_1-tests_jar-_-any-16028111529040704281/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T05:50:54,155 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1f1f87c1{HTTP/1.1, (http/1.1)}{localhost:33575} 2024-11-16T05:50:54,155 INFO [Time-limited test {}] server.Server(415): Started @295853ms 2024-11-16T05:50:54,169 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T05:50:54,224 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:50:54,226 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T05:50:54,227 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T05:50:54,227 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T05:50:54,227 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T05:50:54,227 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@cad7b36{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/hadoop.log.dir/,AVAILABLE} 2024-11-16T05:50:54,227 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f6a71ee{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T05:50:54,304 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T05:50:54,305 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T05:50:54,305 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-16T05:50:54,305 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-16T05:50:54,320 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@54fc4cc1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/java.io.tmpdir/jetty-localhost-38469-hadoop-hdfs-3_4_1-tests_jar-_-any-2285154915130815471/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:50:54,321 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@25e8389f{HTTP/1.1, (http/1.1)}{localhost:38469} 2024-11-16T05:50:54,321 INFO [Time-limited test {}] server.Server(415): Started @296020ms 2024-11-16T05:50:54,322 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T05:50:54,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:54,358 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T05:50:54,361 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T05:50:54,362 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T05:50:54,362 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T05:50:54,362 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T05:50:54,363 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@39601996{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/hadoop.log.dir/,AVAILABLE} 2024-11-16T05:50:54,363 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@660a9944{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T05:50:54,388 WARN [Thread-2492 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/cluster_45e65205-c932-f817-7b85-9d115d338e0a/data/data1/current/BP-2095766484-172.17.0.2-1731736254006/current, will proceed with Du for space computation calculation, 2024-11-16T05:50:54,388 WARN [Thread-2493 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/cluster_45e65205-c932-f817-7b85-9d115d338e0a/data/data2/current/BP-2095766484-172.17.0.2-1731736254006/current, will proceed with Du for space computation calculation, 2024-11-16T05:50:54,406 WARN [Thread-2471 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T05:50:54,409 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd0dcb7e96abd3440 with lease ID 0xb24a9579fd14baf2: Processing first storage report for DS-98847ec0-386a-4008-86cd-7b94cdac71ed from datanode DatanodeRegistration(127.0.0.1:37189, datanodeUuid=32366372-f206-4ecf-bd50-622429ec3bb5, infoPort=33505, infoSecurePort=0, ipcPort=45507, storageInfo=lv=-57;cid=testClusterID;nsid=447812049;c=1731736254006) 2024-11-16T05:50:54,409 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd0dcb7e96abd3440 with lease ID 0xb24a9579fd14baf2: from storage DS-98847ec0-386a-4008-86cd-7b94cdac71ed node DatanodeRegistration(127.0.0.1:37189, datanodeUuid=32366372-f206-4ecf-bd50-622429ec3bb5, infoPort=33505, infoSecurePort=0, ipcPort=45507, storageInfo=lv=-57;cid=testClusterID;nsid=447812049;c=1731736254006), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:50:54,409 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd0dcb7e96abd3440 with lease ID 0xb24a9579fd14baf2: Processing first storage report for DS-93e314f0-51f0-4552-99ed-d09809456f43 from datanode DatanodeRegistration(127.0.0.1:37189, datanodeUuid=32366372-f206-4ecf-bd50-622429ec3bb5, infoPort=33505, infoSecurePort=0, ipcPort=45507, storageInfo=lv=-57;cid=testClusterID;nsid=447812049;c=1731736254006) 2024-11-16T05:50:54,409 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd0dcb7e96abd3440 with lease ID 0xb24a9579fd14baf2: from storage DS-93e314f0-51f0-4552-99ed-d09809456f43 node DatanodeRegistration(127.0.0.1:37189, datanodeUuid=32366372-f206-4ecf-bd50-622429ec3bb5, infoPort=33505, infoSecurePort=0, ipcPort=45507, storageInfo=lv=-57;cid=testClusterID;nsid=447812049;c=1731736254006), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:50:54,470 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@37ed99c5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/java.io.tmpdir/jetty-localhost-34297-hadoop-hdfs-3_4_1-tests_jar-_-any-6217628000829598031/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:50:54,470 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@68dc7da0{HTTP/1.1, (http/1.1)}{localhost:34297} 2024-11-16T05:50:54,470 INFO [Time-limited test {}] server.Server(415): Started @296169ms 2024-11-16T05:50:54,471 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T05:50:54,531 WARN [Thread-2518 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/cluster_45e65205-c932-f817-7b85-9d115d338e0a/data/data3/current/BP-2095766484-172.17.0.2-1731736254006/current, will proceed with Du for space computation calculation, 2024-11-16T05:50:54,531 WARN [Thread-2519 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/cluster_45e65205-c932-f817-7b85-9d115d338e0a/data/data4/current/BP-2095766484-172.17.0.2-1731736254006/current, will proceed with Du for space computation calculation, 2024-11-16T05:50:54,547 WARN [Thread-2507 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T05:50:54,549 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x197b15324f78d411 with lease ID 0xb24a9579fd14baf3: Processing first storage report for DS-264ab51d-a17d-4508-9f36-a70ed7bcb0c0 from datanode DatanodeRegistration(127.0.0.1:33197, datanodeUuid=d854862b-b95c-4a0d-bb20-9200cd77370d, infoPort=43079, infoSecurePort=0, ipcPort=33895, storageInfo=lv=-57;cid=testClusterID;nsid=447812049;c=1731736254006) 2024-11-16T05:50:54,549 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x197b15324f78d411 with lease ID 0xb24a9579fd14baf3: from storage DS-264ab51d-a17d-4508-9f36-a70ed7bcb0c0 node DatanodeRegistration(127.0.0.1:33197, datanodeUuid=d854862b-b95c-4a0d-bb20-9200cd77370d, infoPort=43079, infoSecurePort=0, ipcPort=33895, storageInfo=lv=-57;cid=testClusterID;nsid=447812049;c=1731736254006), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:50:54,549 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x197b15324f78d411 with lease ID 0xb24a9579fd14baf3: Processing first storage report for DS-c30c45d5-e85a-4a83-a2b6-e959df2329a0 from datanode DatanodeRegistration(127.0.0.1:33197, datanodeUuid=d854862b-b95c-4a0d-bb20-9200cd77370d, infoPort=43079, infoSecurePort=0, ipcPort=33895, storageInfo=lv=-57;cid=testClusterID;nsid=447812049;c=1731736254006) 2024-11-16T05:50:54,549 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x197b15324f78d411 with lease ID 0xb24a9579fd14baf3: from storage DS-c30c45d5-e85a-4a83-a2b6-e959df2329a0 node DatanodeRegistration(127.0.0.1:33197, datanodeUuid=d854862b-b95c-4a0d-bb20-9200cd77370d, infoPort=43079, infoSecurePort=0, ipcPort=33895, storageInfo=lv=-57;cid=testClusterID;nsid=447812049;c=1731736254006), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T05:50:54,574 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:54,590 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930 2024-11-16T05:50:54,593 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/cluster_45e65205-c932-f817-7b85-9d115d338e0a/zookeeper_0, clientPort=55071, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/cluster_45e65205-c932-f817-7b85-9d115d338e0a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/cluster_45e65205-c932-f817-7b85-9d115d338e0a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T05:50:54,594 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55071 2024-11-16T05:50:54,594 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:50:54,595 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:50:54,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741825_1001 (size=7) 2024-11-16T05:50:54,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37189 is added to blk_1073741825_1001 (size=7) 2024-11-16T05:50:54,603 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113 with version=8 2024-11-16T05:50:54,603 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42921/user/jenkins/test-data/0d00ce02-f9c4-26d6-e9e7-a3dcefe8d2a2/hbase-staging 2024-11-16T05:50:54,604 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3456ee6a3164:0 server-side Connection retries=45 2024-11-16T05:50:54,605 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T05:50:54,605 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T05:50:54,605 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T05:50:54,605 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T05:50:54,605 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T05:50:54,605 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T05:50:54,605 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T05:50:54,605 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42349 2024-11-16T05:50:54,606 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42349 connecting to ZooKeeper ensemble=127.0.0.1:55071 2024-11-16T05:50:54,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:423490x0, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T05:50:54,610 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42349-0x100471594ef0000 connected 2024-11-16T05:50:54,626 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:50:54,627 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:50:54,628 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42349-0x100471594ef0000, quorum=127.0.0.1:55071, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T05:50:54,629 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113, hbase.cluster.distributed=false 2024-11-16T05:50:54,630 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42349-0x100471594ef0000, quorum=127.0.0.1:55071, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T05:50:54,631 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42349 2024-11-16T05:50:54,631 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42349 2024-11-16T05:50:54,631 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42349 2024-11-16T05:50:54,631 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42349 2024-11-16T05:50:54,631 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42349 2024-11-16T05:50:54,646 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3456ee6a3164:0 server-side Connection retries=45 2024-11-16T05:50:54,646 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T05:50:54,646 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T05:50:54,646 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T05:50:54,646 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T05:50:54,646 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T05:50:54,646 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T05:50:54,646 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T05:50:54,647 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40505 2024-11-16T05:50:54,647 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40505 connecting to ZooKeeper ensemble=127.0.0.1:55071 2024-11-16T05:50:54,648 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:50:54,649 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:50:54,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:405050x0, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T05:50:54,654 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40505-0x100471594ef0001 connected 2024-11-16T05:50:54,654 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40505-0x100471594ef0001, quorum=127.0.0.1:55071, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T05:50:54,655 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T05:50:54,655 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T05:50:54,656 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40505-0x100471594ef0001, quorum=127.0.0.1:55071, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T05:50:54,656 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40505-0x100471594ef0001, quorum=127.0.0.1:55071, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T05:50:54,657 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40505 2024-11-16T05:50:54,658 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40505 2024-11-16T05:50:54,658 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40505 2024-11-16T05:50:54,658 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40505 2024-11-16T05:50:54,659 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40505 2024-11-16T05:50:54,670 DEBUG [M:0;3456ee6a3164:42349 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3456ee6a3164:42349 2024-11-16T05:50:54,671 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3456ee6a3164,42349,1731736254604 2024-11-16T05:50:54,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42349-0x100471594ef0000, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T05:50:54,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40505-0x100471594ef0001, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T05:50:54,674 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42349-0x100471594ef0000, quorum=127.0.0.1:55071, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3456ee6a3164,42349,1731736254604 2024-11-16T05:50:54,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40505-0x100471594ef0001, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T05:50:54,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40505-0x100471594ef0001, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:50:54,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42349-0x100471594ef0000, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:50:54,675 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42349-0x100471594ef0000, quorum=127.0.0.1:55071, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T05:50:54,676 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3456ee6a3164,42349,1731736254604 from backup master directory 2024-11-16T05:50:54,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40505-0x100471594ef0001, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T05:50:54,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42349-0x100471594ef0000, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3456ee6a3164,42349,1731736254604 2024-11-16T05:50:54,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42349-0x100471594ef0000, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T05:50:54,677 WARN [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T05:50:54,677 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3456ee6a3164,42349,1731736254604 2024-11-16T05:50:54,680 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/hbase.id] with ID: 69c108e4-73e3-4af0-8b58-645a45892beb 2024-11-16T05:50:54,680 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/.tmp/hbase.id 2024-11-16T05:50:54,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37189 is added to blk_1073741826_1002 (size=42) 2024-11-16T05:50:54,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741826_1002 (size=42) 2024-11-16T05:50:54,691 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/.tmp/hbase.id]:[hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/hbase.id] 2024-11-16T05:50:54,700 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:50:54,701 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T05:50:54,702 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-16T05:50:54,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42349-0x100471594ef0000, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:50:54,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40505-0x100471594ef0001, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:50:54,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741827_1003 (size=196) 2024-11-16T05:50:54,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37189 is added to blk_1073741827_1003 (size=196) 2024-11-16T05:50:54,709 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T05:50:54,710 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T05:50:54,711 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T05:50:54,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741828_1004 (size=1189) 2024-11-16T05:50:54,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37189 is added to blk_1073741828_1004 (size=1189) 2024-11-16T05:50:54,718 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/MasterData/data/master/store 2024-11-16T05:50:54,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37189 is added to blk_1073741829_1005 (size=34) 2024-11-16T05:50:54,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741829_1005 (size=34) 2024-11-16T05:50:54,724 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:50:54,724 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T05:50:54,724 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:50:54,724 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:50:54,724 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T05:50:54,724 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:50:54,724 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:50:54,724 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731736254724Disabling compacts and flushes for region at 1731736254724Disabling writes for close at 1731736254724Writing region close event to WAL at 1731736254724Closed at 1731736254724 2024-11-16T05:50:54,725 WARN [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/MasterData/data/master/store/.initializing 2024-11-16T05:50:54,725 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/MasterData/WALs/3456ee6a3164,42349,1731736254604 2024-11-16T05:50:54,727 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3456ee6a3164%2C42349%2C1731736254604, suffix=, logDir=hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/MasterData/WALs/3456ee6a3164,42349,1731736254604, archiveDir=hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/MasterData/oldWALs, maxLogs=10 2024-11-16T05:50:54,727 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C42349%2C1731736254604.1731736254727 2024-11-16T05:50:54,731 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/MasterData/WALs/3456ee6a3164,42349,1731736254604/3456ee6a3164%2C42349%2C1731736254604.1731736254727 2024-11-16T05:50:54,731 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33505:33505),(127.0.0.1/127.0.0.1:43079:43079)] 2024-11-16T05:50:54,732 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T05:50:54,732 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:50:54,732 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:50:54,732 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:50:54,733 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:50:54,735 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T05:50:54,735 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:50:54,735 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:50:54,735 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:50:54,736 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T05:50:54,736 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:50:54,737 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T05:50:54,737 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:50:54,738 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T05:50:54,738 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:50:54,738 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T05:50:54,738 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:50:54,739 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T05:50:54,739 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:50:54,740 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T05:50:54,740 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:50:54,740 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:50:54,741 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:50:54,742 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:50:54,742 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:50:54,742 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T05:50:54,743 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T05:50:54,744 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T05:50:54,745 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=883882, jitterRate=0.12391440570354462}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T05:50:54,745 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731736254732Initializing all the Stores at 1731736254733 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736254733Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736254733Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736254733Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736254733Cleaning up temporary data from old regions at 1731736254742 (+9 ms)Region opened successfully at 1731736254745 (+3 ms) 2024-11-16T05:50:54,746 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T05:50:54,748 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72790271, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3456ee6a3164/172.17.0.2:0 2024-11-16T05:50:54,749 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T05:50:54,749 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T05:50:54,749 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T05:50:54,749 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T05:50:54,749 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T05:50:54,749 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T05:50:54,750 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T05:50:54,751 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T05:50:54,752 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42349-0x100471594ef0000, quorum=127.0.0.1:55071, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T05:50:54,753 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T05:50:54,753 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T05:50:54,754 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42349-0x100471594ef0000, quorum=127.0.0.1:55071, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T05:50:54,755 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T05:50:54,755 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T05:50:54,756 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42349-0x100471594ef0000, quorum=127.0.0.1:55071, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T05:50:54,756 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T05:50:54,757 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42349-0x100471594ef0000, quorum=127.0.0.1:55071, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T05:50:54,758 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T05:50:54,760 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42349-0x100471594ef0000, quorum=127.0.0.1:55071, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T05:50:54,763 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T05:50:54,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40505-0x100471594ef0001, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T05:50:54,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42349-0x100471594ef0000, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T05:50:54,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40505-0x100471594ef0001, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:50:54,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42349-0x100471594ef0000, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:50:54,765 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3456ee6a3164,42349,1731736254604, sessionid=0x100471594ef0000, setting cluster-up flag (Was=false) 2024-11-16T05:50:54,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42349-0x100471594ef0000, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:50:54,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40505-0x100471594ef0001, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:50:54,772 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T05:50:54,773 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3456ee6a3164,42349,1731736254604 2024-11-16T05:50:54,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40505-0x100471594ef0001, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:50:54,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42349-0x100471594ef0000, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:50:54,777 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T05:50:54,778 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3456ee6a3164,42349,1731736254604 2024-11-16T05:50:54,779 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T05:50:54,780 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T05:50:54,780 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T05:50:54,780 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T05:50:54,780 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3456ee6a3164,42349,1731736254604 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T05:50:54,781 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3456ee6a3164:0, corePoolSize=5, maxPoolSize=5 2024-11-16T05:50:54,781 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3456ee6a3164:0, corePoolSize=5, maxPoolSize=5 2024-11-16T05:50:54,781 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3456ee6a3164:0, corePoolSize=5, maxPoolSize=5 2024-11-16T05:50:54,781 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3456ee6a3164:0, corePoolSize=5, maxPoolSize=5 2024-11-16T05:50:54,781 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3456ee6a3164:0, corePoolSize=10, maxPoolSize=10 2024-11-16T05:50:54,781 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:50:54,781 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3456ee6a3164:0, corePoolSize=2, maxPoolSize=2 2024-11-16T05:50:54,782 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:50:54,782 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731736284782 2024-11-16T05:50:54,782 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T05:50:54,782 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T05:50:54,782 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T05:50:54,782 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T05:50:54,782 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T05:50:54,782 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T05:50:54,782 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T05:50:54,783 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T05:50:54,783 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T05:50:54,783 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T05:50:54,784 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T05:50:54,784 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T05:50:54,784 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T05:50:54,784 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T05:50:54,784 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.large.0-1731736254784,5,FailOnTimeoutGroup] 2024-11-16T05:50:54,784 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.small.0-1731736254784,5,FailOnTimeoutGroup] 2024-11-16T05:50:54,784 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T05:50:54,784 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T05:50:54,784 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T05:50:54,784 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T05:50:54,785 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:50:54,785 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T05:50:54,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37189 is added to blk_1073741831_1007 (size=1321) 2024-11-16T05:50:54,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741831_1007 (size=1321) 2024-11-16T05:50:54,793 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T05:50:54,793 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113 2024-11-16T05:50:54,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741832_1008 (size=32) 2024-11-16T05:50:54,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37189 is added to blk_1073741832_1008 (size=32) 2024-11-16T05:50:54,799 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:50:54,800 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T05:50:54,801 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T05:50:54,801 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:50:54,802 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:50:54,802 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T05:50:54,802 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T05:50:54,803 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:50:54,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:50:54,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T05:50:54,804 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T05:50:54,804 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:50:54,804 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:50:54,804 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T05:50:54,805 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T05:50:54,805 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:50:54,805 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:50:54,805 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T05:50:54,806 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/data/hbase/meta/1588230740 2024-11-16T05:50:54,806 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/data/hbase/meta/1588230740 2024-11-16T05:50:54,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T05:50:54,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T05:50:54,808 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T05:50:54,808 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T05:50:54,810 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T05:50:54,810 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=733677, jitterRate=-0.06708258390426636}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T05:50:54,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731736254799Initializing all the Stores at 1731736254800 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736254800Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736254800Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736254800Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736254800Cleaning up temporary data from old regions at 1731736254807 (+7 ms)Region opened successfully at 1731736254811 (+4 ms) 2024-11-16T05:50:54,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T05:50:54,811 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T05:50:54,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T05:50:54,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T05:50:54,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T05:50:54,811 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T05:50:54,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731736254811Disabling compacts and flushes for region at 1731736254811Disabling writes for close at 1731736254811Writing region close event to WAL at 1731736254811Closed at 1731736254811 2024-11-16T05:50:54,812 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T05:50:54,813 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T05:50:54,813 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T05:50:54,814 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T05:50:54,815 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T05:50:54,860 INFO [RS:0;3456ee6a3164:40505 {}] regionserver.HRegionServer(746): ClusterId : 69c108e4-73e3-4af0-8b58-645a45892beb 2024-11-16T05:50:54,860 DEBUG [RS:0;3456ee6a3164:40505 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T05:50:54,862 DEBUG [RS:0;3456ee6a3164:40505 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T05:50:54,862 DEBUG [RS:0;3456ee6a3164:40505 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T05:50:54,863 DEBUG [RS:0;3456ee6a3164:40505 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T05:50:54,864 DEBUG [RS:0;3456ee6a3164:40505 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12a21eb0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3456ee6a3164/172.17.0.2:0 2024-11-16T05:50:54,877 DEBUG [RS:0;3456ee6a3164:40505 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3456ee6a3164:40505 2024-11-16T05:50:54,877 INFO [RS:0;3456ee6a3164:40505 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T05:50:54,877 INFO [RS:0;3456ee6a3164:40505 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T05:50:54,877 DEBUG [RS:0;3456ee6a3164:40505 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T05:50:54,878 INFO [RS:0;3456ee6a3164:40505 {}] regionserver.HRegionServer(2659): reportForDuty to master=3456ee6a3164,42349,1731736254604 with port=40505, startcode=1731736254645 2024-11-16T05:50:54,878 DEBUG [RS:0;3456ee6a3164:40505 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T05:50:54,880 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51395, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T05:50:54,881 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42349 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3456ee6a3164,40505,1731736254645 2024-11-16T05:50:54,881 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42349 {}] master.ServerManager(517): Registering regionserver=3456ee6a3164,40505,1731736254645 2024-11-16T05:50:54,882 DEBUG [RS:0;3456ee6a3164:40505 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113 2024-11-16T05:50:54,882 DEBUG [RS:0;3456ee6a3164:40505 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37551 2024-11-16T05:50:54,882 DEBUG [RS:0;3456ee6a3164:40505 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T05:50:54,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42349-0x100471594ef0000, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T05:50:54,884 DEBUG [RS:0;3456ee6a3164:40505 {}] zookeeper.ZKUtil(111): regionserver:40505-0x100471594ef0001, quorum=127.0.0.1:55071, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3456ee6a3164,40505,1731736254645 2024-11-16T05:50:54,884 WARN [RS:0;3456ee6a3164:40505 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T05:50:54,884 INFO [RS:0;3456ee6a3164:40505 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T05:50:54,884 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3456ee6a3164,40505,1731736254645] 2024-11-16T05:50:54,885 DEBUG [RS:0;3456ee6a3164:40505 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/WALs/3456ee6a3164,40505,1731736254645 2024-11-16T05:50:54,888 INFO [RS:0;3456ee6a3164:40505 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T05:50:54,889 INFO [RS:0;3456ee6a3164:40505 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T05:50:54,890 INFO [RS:0;3456ee6a3164:40505 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T05:50:54,890 INFO [RS:0;3456ee6a3164:40505 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T05:50:54,890 INFO [RS:0;3456ee6a3164:40505 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T05:50:54,891 INFO [RS:0;3456ee6a3164:40505 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T05:50:54,891 INFO [RS:0;3456ee6a3164:40505 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T05:50:54,891 DEBUG [RS:0;3456ee6a3164:40505 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:50:54,891 DEBUG [RS:0;3456ee6a3164:40505 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:50:54,891 DEBUG [RS:0;3456ee6a3164:40505 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:50:54,891 DEBUG [RS:0;3456ee6a3164:40505 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:50:54,891 DEBUG [RS:0;3456ee6a3164:40505 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:50:54,891 DEBUG [RS:0;3456ee6a3164:40505 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3456ee6a3164:0, corePoolSize=2, maxPoolSize=2 2024-11-16T05:50:54,891 DEBUG [RS:0;3456ee6a3164:40505 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:50:54,891 DEBUG [RS:0;3456ee6a3164:40505 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:50:54,891 DEBUG [RS:0;3456ee6a3164:40505 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:50:54,891 DEBUG [RS:0;3456ee6a3164:40505 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:50:54,891 DEBUG [RS:0;3456ee6a3164:40505 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:50:54,891 DEBUG [RS:0;3456ee6a3164:40505 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3456ee6a3164:0, corePoolSize=1, maxPoolSize=1 2024-11-16T05:50:54,891 DEBUG [RS:0;3456ee6a3164:40505 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3456ee6a3164:0, corePoolSize=3, maxPoolSize=3 2024-11-16T05:50:54,891 DEBUG [RS:0;3456ee6a3164:40505 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3456ee6a3164:0, corePoolSize=3, maxPoolSize=3 2024-11-16T05:50:54,892 INFO [RS:0;3456ee6a3164:40505 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T05:50:54,892 INFO [RS:0;3456ee6a3164:40505 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T05:50:54,892 INFO [RS:0;3456ee6a3164:40505 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T05:50:54,892 INFO [RS:0;3456ee6a3164:40505 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T05:50:54,892 INFO [RS:0;3456ee6a3164:40505 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T05:50:54,892 INFO [RS:0;3456ee6a3164:40505 {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,40505,1731736254645-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T05:50:54,905 INFO [RS:0;3456ee6a3164:40505 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T05:50:54,905 INFO [RS:0;3456ee6a3164:40505 {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,40505,1731736254645-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T05:50:54,906 INFO [RS:0;3456ee6a3164:40505 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:50:54,906 INFO [RS:0;3456ee6a3164:40505 {}] regionserver.Replication(171): 3456ee6a3164,40505,1731736254645 started 2024-11-16T05:50:54,918 INFO [RS:0;3456ee6a3164:40505 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:50:54,918 INFO [RS:0;3456ee6a3164:40505 {}] regionserver.HRegionServer(1482): Serving as 3456ee6a3164,40505,1731736254645, RpcServer on 3456ee6a3164/172.17.0.2:40505, sessionid=0x100471594ef0001 2024-11-16T05:50:54,919 DEBUG [RS:0;3456ee6a3164:40505 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T05:50:54,919 DEBUG [RS:0;3456ee6a3164:40505 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3456ee6a3164,40505,1731736254645 2024-11-16T05:50:54,919 DEBUG [RS:0;3456ee6a3164:40505 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3456ee6a3164,40505,1731736254645' 2024-11-16T05:50:54,919 DEBUG [RS:0;3456ee6a3164:40505 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T05:50:54,919 DEBUG [RS:0;3456ee6a3164:40505 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T05:50:54,920 DEBUG [RS:0;3456ee6a3164:40505 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T05:50:54,920 DEBUG [RS:0;3456ee6a3164:40505 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T05:50:54,920 DEBUG [RS:0;3456ee6a3164:40505 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3456ee6a3164,40505,1731736254645 2024-11-16T05:50:54,920 DEBUG [RS:0;3456ee6a3164:40505 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3456ee6a3164,40505,1731736254645' 2024-11-16T05:50:54,920 DEBUG [RS:0;3456ee6a3164:40505 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T05:50:54,920 DEBUG [RS:0;3456ee6a3164:40505 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T05:50:54,920 DEBUG [RS:0;3456ee6a3164:40505 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T05:50:54,921 INFO [RS:0;3456ee6a3164:40505 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T05:50:54,921 INFO [RS:0;3456ee6a3164:40505 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T05:50:54,965 WARN [3456ee6a3164:42349 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-16T05:50:55,022 INFO [RS:0;3456ee6a3164:40505 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3456ee6a3164%2C40505%2C1731736254645, suffix=, logDir=hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/WALs/3456ee6a3164,40505,1731736254645, archiveDir=hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/oldWALs, maxLogs=32 2024-11-16T05:50:55,023 INFO [RS:0;3456ee6a3164:40505 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C40505%2C1731736254645.1731736255023 2024-11-16T05:50:55,027 INFO [RS:0;3456ee6a3164:40505 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/WALs/3456ee6a3164,40505,1731736254645/3456ee6a3164%2C40505%2C1731736254645.1731736255023 2024-11-16T05:50:55,028 DEBUG [RS:0;3456ee6a3164:40505 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43079:43079),(127.0.0.1/127.0.0.1:33505:33505)] 2024-11-16T05:50:55,215 DEBUG [3456ee6a3164:42349 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T05:50:55,216 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3456ee6a3164,40505,1731736254645 2024-11-16T05:50:55,217 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3456ee6a3164,40505,1731736254645, state=OPENING 2024-11-16T05:50:55,218 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T05:50:55,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40505-0x100471594ef0001, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:50:55,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42349-0x100471594ef0000, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:50:55,221 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T05:50:55,221 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T05:50:55,221 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T05:50:55,221 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3456ee6a3164,40505,1731736254645}] 2024-11-16T05:50:55,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,41701,1731736064423/3456ee6a3164%2C41701%2C1731736064423.1731736064627 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:55,374 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T05:50:55,375 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34455, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T05:50:55,379 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T05:50:55,379 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T05:50:55,380 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3456ee6a3164%2C40505%2C1731736254645.meta, suffix=.meta, logDir=hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/WALs/3456ee6a3164,40505,1731736254645, archiveDir=hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/oldWALs, maxLogs=32 2024-11-16T05:50:55,380 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3456ee6a3164%2C40505%2C1731736254645.meta.1731736255380.meta 2024-11-16T05:50:55,389 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/WALs/3456ee6a3164,40505,1731736254645/3456ee6a3164%2C40505%2C1731736254645.meta.1731736255380.meta 2024-11-16T05:50:55,392 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33505:33505),(127.0.0.1/127.0.0.1:43079:43079)] 2024-11-16T05:50:55,394 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T05:50:55,394 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T05:50:55,394 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T05:50:55,394 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T05:50:55,395 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T05:50:55,395 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T05:50:55,395 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T05:50:55,395 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T05:50:55,396 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T05:50:55,397 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T05:50:55,397 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:50:55,398 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:50:55,398 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T05:50:55,399 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T05:50:55,399 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:50:55,399 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:50:55,400 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T05:50:55,400 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T05:50:55,400 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:50:55,401 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:50:55,401 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T05:50:55,401 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T05:50:55,402 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T05:50:55,402 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T05:50:55,402 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T05:50:55,403 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/data/hbase/meta/1588230740 2024-11-16T05:50:55,404 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/data/hbase/meta/1588230740 2024-11-16T05:50:55,405 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T05:50:55,405 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T05:50:55,405 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T05:50:55,406 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T05:50:55,407 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=727639, jitterRate=-0.07475949823856354}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T05:50:55,407 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T05:50:55,408 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731736255395Writing region info on filesystem at 1731736255395Initializing all the Stores at 1731736255396 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736255396Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736255396Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731736255396Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731736255396Cleaning up temporary data from old regions at 1731736255405 (+9 ms)Running coprocessor post-open hooks at 1731736255407 (+2 ms)Region opened successfully at 1731736255408 (+1 ms) 2024-11-16T05:50:55,408 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731736255373 2024-11-16T05:50:55,410 DEBUG [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T05:50:55,411 INFO [RS_OPEN_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T05:50:55,411 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3456ee6a3164,40505,1731736254645 2024-11-16T05:50:55,412 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3456ee6a3164,40505,1731736254645, state=OPEN 2024-11-16T05:50:55,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42349-0x100471594ef0000, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T05:50:55,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40505-0x100471594ef0001, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T05:50:55,417 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3456ee6a3164,40505,1731736254645 2024-11-16T05:50:55,417 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T05:50:55,417 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T05:50:55,420 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T05:50:55,420 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3456ee6a3164,40505,1731736254645 in 196 msec 2024-11-16T05:50:55,423 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T05:50:55,423 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 607 msec 2024-11-16T05:50:55,423 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T05:50:55,423 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T05:50:55,424 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T05:50:55,425 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,40505,1731736254645, seqNum=-1] 2024-11-16T05:50:55,425 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T05:50:55,426 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47085, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T05:50:55,431 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 651 msec 2024-11-16T05:50:55,431 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731736255431, completionTime=-1 2024-11-16T05:50:55,432 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T05:50:55,432 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-16T05:50:55,434 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-16T05:50:55,434 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731736315434 2024-11-16T05:50:55,434 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731736375434 2024-11-16T05:50:55,434 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-16T05:50:55,434 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,42349,1731736254604-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T05:50:55,435 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,42349,1731736254604-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:50:55,435 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,42349,1731736254604-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:50:55,435 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3456ee6a3164:42349, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:50:55,435 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T05:50:55,435 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T05:50:55,437 DEBUG [master/3456ee6a3164:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T05:50:55,440 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.763sec 2024-11-16T05:50:55,440 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T05:50:55,440 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T05:50:55,440 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T05:50:55,440 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T05:50:55,440 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T05:50:55,440 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,42349,1731736254604-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T05:50:55,440 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,42349,1731736254604-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T05:50:55,443 DEBUG [master/3456ee6a3164:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T05:50:55,443 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T05:50:55,443 INFO [master/3456ee6a3164:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3456ee6a3164,42349,1731736254604-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T05:50:55,461 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@199ad4f9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T05:50:55,461 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3456ee6a3164,42349,-1 for getting cluster id 2024-11-16T05:50:55,461 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T05:50:55,463 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '69c108e4-73e3-4af0-8b58-645a45892beb' 2024-11-16T05:50:55,463 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T05:50:55,463 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "69c108e4-73e3-4af0-8b58-645a45892beb" 2024-11-16T05:50:55,463 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27a3e655, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T05:50:55,464 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3456ee6a3164,42349,-1] 2024-11-16T05:50:55,464 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T05:50:55,464 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:50:55,466 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43016, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T05:50:55,467 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d8fb170, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T05:50:55,467 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T05:50:55,468 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3456ee6a3164,40505,1731736254645, seqNum=-1] 2024-11-16T05:50:55,469 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T05:50:55,470 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60754, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T05:50:55,472 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3456ee6a3164,42349,1731736254604 2024-11-16T05:50:55,473 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T05:50:55,475 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T05:50:55,476 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T05:50:55,478 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/WALs/test.com,8080,1, archiveDir=hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/oldWALs, maxLogs=32 2024-11-16T05:50:55,479 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731736255478 2024-11-16T05:50:55,489 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/WALs/test.com,8080,1/test.com%2C8080%2C1.1731736255478 2024-11-16T05:50:55,497 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33505:33505),(127.0.0.1/127.0.0.1:43079:43079)] 2024-11-16T05:50:55,498 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731736255498 2024-11-16T05:50:55,514 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:55,514 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:55,514 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:55,514 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:55,514 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:55,514 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/WALs/test.com,8080,1/test.com%2C8080%2C1.1731736255478 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/WALs/test.com,8080,1/test.com%2C8080%2C1.1731736255498 2024-11-16T05:50:55,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741835_1011 (size=93) 2024-11-16T05:50:55,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37189 is added to blk_1073741835_1011 (size=93) 2024-11-16T05:50:55,522 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33505:33505),(127.0.0.1/127.0.0.1:43079:43079)] 2024-11-16T05:50:55,523 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/WALs/test.com,8080,1/test.com%2C8080%2C1.1731736255478 to hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/oldWALs/test.com%2C8080%2C1.1731736255478 2024-11-16T05:50:55,524 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:55,524 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:55,524 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:55,524 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:55,524 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:55,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741836_1012 (size=93) 2024-11-16T05:50:55,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37189 is added to blk_1073741836_1012 (size=93) 2024-11-16T05:50:55,528 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/oldWALs 2024-11-16T05:50:55,528 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731736255498) 2024-11-16T05:50:55,528 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T05:50:55,528 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T05:50:55,528 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T05:50:55,528 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:50:55,528 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:50:55,528 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T05:50:55,528 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=774222860, stopped=false 2024-11-16T05:50:55,528 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T05:50:55,528 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3456ee6a3164,42349,1731736254604 2024-11-16T05:50:55,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42349-0x100471594ef0000, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T05:50:55,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42349-0x100471594ef0000, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:50:55,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40505-0x100471594ef0001, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T05:50:55,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40505-0x100471594ef0001, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:50:55,530 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T05:50:55,530 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T05:50:55,530 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T05:50:55,530 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:50:55,530 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3456ee6a3164,40505,1731736254645' ***** 2024-11-16T05:50:55,530 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T05:50:55,531 INFO [RS:0;3456ee6a3164:40505 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T05:50:55,531 INFO [RS:0;3456ee6a3164:40505 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T05:50:55,531 INFO [RS:0;3456ee6a3164:40505 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T05:50:55,531 INFO [RS:0;3456ee6a3164:40505 {}] regionserver.HRegionServer(959): stopping server 3456ee6a3164,40505,1731736254645 2024-11-16T05:50:55,531 INFO [RS:0;3456ee6a3164:40505 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T05:50:55,531 INFO [RS:0;3456ee6a3164:40505 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3456ee6a3164:40505. 2024-11-16T05:50:55,531 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40505-0x100471594ef0001, quorum=127.0.0.1:55071, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T05:50:55,531 DEBUG [RS:0;3456ee6a3164:40505 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T05:50:55,531 DEBUG [RS:0;3456ee6a3164:40505 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:50:55,531 INFO [RS:0;3456ee6a3164:40505 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T05:50:55,531 INFO [RS:0;3456ee6a3164:40505 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T05:50:55,531 INFO [RS:0;3456ee6a3164:40505 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T05:50:55,531 INFO [RS:0;3456ee6a3164:40505 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T05:50:55,532 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T05:50:55,532 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42349-0x100471594ef0000, quorum=127.0.0.1:55071, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T05:50:55,532 INFO [RS:0;3456ee6a3164:40505 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-16T05:50:55,532 DEBUG [RS:0;3456ee6a3164:40505 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-16T05:50:55,532 DEBUG [RS:0;3456ee6a3164:40505 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-16T05:50:55,533 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T05:50:55,533 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T05:50:55,533 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T05:50:55,533 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T05:50:55,533 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T05:50:55,533 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-16T05:50:55,554 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/data/hbase/meta/1588230740/.tmp/ns/adffd5e1fef545c68bcb6adc529352ff is 43, key is default/ns:d/1731736255427/Put/seqid=0 2024-11-16T05:50:55,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37189 is added to blk_1073741837_1013 (size=5153) 2024-11-16T05:50:55,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741837_1013 (size=5153) 2024-11-16T05:50:55,558 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/data/hbase/meta/1588230740/.tmp/ns/adffd5e1fef545c68bcb6adc529352ff 2024-11-16T05:50:55,564 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/data/hbase/meta/1588230740/.tmp/ns/adffd5e1fef545c68bcb6adc529352ff as hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/data/hbase/meta/1588230740/ns/adffd5e1fef545c68bcb6adc529352ff 2024-11-16T05:50:55,571 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/data/hbase/meta/1588230740/ns/adffd5e1fef545c68bcb6adc529352ff, entries=2, sequenceid=6, filesize=5.0 K 2024-11-16T05:50:55,573 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 40ms, sequenceid=6, compaction requested=false 2024-11-16T05:50:55,574 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36821/user/jenkins/test-data/30d9e81e-520a-e278-f1ff-6fe0138f5bc7/WALs/3456ee6a3164,46863,1731736063466/3456ee6a3164%2C46863%2C1731736063466.meta.1731736064298.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T05:50:55,579 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-16T05:50:55,580 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T05:50:55,580 INFO [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T05:50:55,580 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731736255533Running coprocessor pre-close hooks at 1731736255533Disabling compacts and flushes for region at 1731736255533Disabling writes for close at 1731736255533Obtaining lock to block concurrent updates at 1731736255533Preparing flush snapshotting stores in 1588230740 at 1731736255533Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731736255533Flushing stores of hbase:meta,,1.1588230740 at 1731736255534 (+1 ms)Flushing 1588230740/ns: creating writer at 1731736255534Flushing 1588230740/ns: appending metadata at 1731736255553 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1731736255553Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@24e5023c: reopening flushed file at 1731736255563 (+10 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 40ms, sequenceid=6, compaction requested=false at 1731736255573 (+10 ms)Writing region close event to WAL at 1731736255573Running coprocessor post-close hooks at 1731736255580 (+7 ms)Closed at 1731736255580 2024-11-16T05:50:55,580 DEBUG [RS_CLOSE_META-regionserver/3456ee6a3164:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T05:50:55,732 INFO [RS:0;3456ee6a3164:40505 {}] regionserver.HRegionServer(976): stopping server 3456ee6a3164,40505,1731736254645; all regions closed. 2024-11-16T05:50:55,733 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:55,733 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:55,733 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:55,733 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:55,733 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:55,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741834_1010 (size=1152) 2024-11-16T05:50:55,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37189 is added to blk_1073741834_1010 (size=1152) 2024-11-16T05:50:55,737 DEBUG [RS:0;3456ee6a3164:40505 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/oldWALs 2024-11-16T05:50:55,737 INFO [RS:0;3456ee6a3164:40505 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3456ee6a3164%2C40505%2C1731736254645.meta:.meta(num 1731736255380) 2024-11-16T05:50:55,738 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:55,738 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:55,738 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:55,738 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:55,738 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:55,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37189 is added to blk_1073741833_1009 (size=93) 2024-11-16T05:50:55,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741833_1009 (size=93) 2024-11-16T05:50:55,743 DEBUG [RS:0;3456ee6a3164:40505 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/oldWALs 2024-11-16T05:50:55,743 INFO [RS:0;3456ee6a3164:40505 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3456ee6a3164%2C40505%2C1731736254645:(num 1731736255023) 2024-11-16T05:50:55,743 DEBUG [RS:0;3456ee6a3164:40505 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T05:50:55,743 INFO [RS:0;3456ee6a3164:40505 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T05:50:55,743 INFO [RS:0;3456ee6a3164:40505 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T05:50:55,743 INFO [RS:0;3456ee6a3164:40505 {}] hbase.ChoreService(370): Chore service for: regionserver/3456ee6a3164:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T05:50:55,743 INFO [RS:0;3456ee6a3164:40505 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T05:50:55,743 INFO [regionserver/3456ee6a3164:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T05:50:55,743 INFO [RS:0;3456ee6a3164:40505 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40505 2024-11-16T05:50:55,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40505-0x100471594ef0001, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3456ee6a3164,40505,1731736254645 2024-11-16T05:50:55,746 INFO [RS:0;3456ee6a3164:40505 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T05:50:55,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42349-0x100471594ef0000, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T05:50:55,748 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3456ee6a3164,40505,1731736254645] 2024-11-16T05:50:55,750 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3456ee6a3164,40505,1731736254645 already deleted, retry=false 2024-11-16T05:50:55,751 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3456ee6a3164,40505,1731736254645 expired; onlineServers=0 2024-11-16T05:50:55,751 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3456ee6a3164,42349,1731736254604' ***** 2024-11-16T05:50:55,751 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T05:50:55,751 INFO [M:0;3456ee6a3164:42349 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T05:50:55,751 INFO [M:0;3456ee6a3164:42349 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T05:50:55,751 DEBUG [M:0;3456ee6a3164:42349 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T05:50:55,751 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T05:50:55,751 DEBUG [M:0;3456ee6a3164:42349 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T05:50:55,751 DEBUG [master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.large.0-1731736254784 {}] cleaner.HFileCleaner(306): Exit Thread[master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.large.0-1731736254784,5,FailOnTimeoutGroup] 2024-11-16T05:50:55,751 DEBUG [master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.small.0-1731736254784 {}] cleaner.HFileCleaner(306): Exit Thread[master/3456ee6a3164:0:becomeActiveMaster-HFileCleaner.small.0-1731736254784,5,FailOnTimeoutGroup] 2024-11-16T05:50:55,751 INFO [M:0;3456ee6a3164:42349 {}] hbase.ChoreService(370): Chore service for: master/3456ee6a3164:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T05:50:55,751 INFO [M:0;3456ee6a3164:42349 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T05:50:55,751 DEBUG [M:0;3456ee6a3164:42349 {}] master.HMaster(1795): Stopping service threads 2024-11-16T05:50:55,751 INFO [M:0;3456ee6a3164:42349 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T05:50:55,751 INFO [M:0;3456ee6a3164:42349 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T05:50:55,752 INFO [M:0;3456ee6a3164:42349 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T05:50:55,752 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T05:50:55,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42349-0x100471594ef0000, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T05:50:55,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42349-0x100471594ef0000, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T05:50:55,756 DEBUG [M:0;3456ee6a3164:42349 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-16T05:50:55,756 DEBUG [M:0;3456ee6a3164:42349 {}] master.ActiveMasterManager(353): master:42349-0x100471594ef0000, quorum=127.0.0.1:55071, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-16T05:50:55,757 INFO [M:0;3456ee6a3164:42349 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/.lastflushedseqids 2024-11-16T05:50:55,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741838_1014 (size=99) 2024-11-16T05:50:55,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37189 is added to blk_1073741838_1014 (size=99) 2024-11-16T05:50:55,765 INFO [M:0;3456ee6a3164:42349 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T05:50:55,766 INFO [M:0;3456ee6a3164:42349 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T05:50:55,766 DEBUG [M:0;3456ee6a3164:42349 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T05:50:55,766 INFO [M:0;3456ee6a3164:42349 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:50:55,766 DEBUG [M:0;3456ee6a3164:42349 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:50:55,766 DEBUG [M:0;3456ee6a3164:42349 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T05:50:55,766 DEBUG [M:0;3456ee6a3164:42349 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:50:55,766 INFO [M:0;3456ee6a3164:42349 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-16T05:50:55,782 DEBUG [M:0;3456ee6a3164:42349 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/54d0596f81c44311a344328c06262b95 is 82, key is hbase:meta,,1/info:regioninfo/1731736255411/Put/seqid=0 2024-11-16T05:50:55,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37189 is added to blk_1073741839_1015 (size=5672) 2024-11-16T05:50:55,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741839_1015 (size=5672) 2024-11-16T05:50:55,788 INFO [M:0;3456ee6a3164:42349 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/54d0596f81c44311a344328c06262b95 2024-11-16T05:50:55,807 DEBUG [M:0;3456ee6a3164:42349 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a216c5b9ba6b4452b186a38c4ccb6053 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731736255431/Put/seqid=0 2024-11-16T05:50:55,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37189 is added to blk_1073741840_1016 (size=5275) 2024-11-16T05:50:55,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741840_1016 (size=5275) 2024-11-16T05:50:55,813 INFO [M:0;3456ee6a3164:42349 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a216c5b9ba6b4452b186a38c4ccb6053 2024-11-16T05:50:55,831 DEBUG [M:0;3456ee6a3164:42349 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e608e6db0d874107abcf56103d9df5aa is 69, key is 3456ee6a3164,40505,1731736254645/rs:state/1731736254881/Put/seqid=0 2024-11-16T05:50:55,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741841_1017 (size=5156) 2024-11-16T05:50:55,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37189 is added to blk_1073741841_1017 (size=5156) 2024-11-16T05:50:55,839 INFO [M:0;3456ee6a3164:42349 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e608e6db0d874107abcf56103d9df5aa 2024-11-16T05:50:55,848 INFO [RS:0;3456ee6a3164:40505 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T05:50:55,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40505-0x100471594ef0001, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T05:50:55,848 INFO [RS:0;3456ee6a3164:40505 {}] regionserver.HRegionServer(1031): Exiting; stopping=3456ee6a3164,40505,1731736254645; zookeeper connection closed. 2024-11-16T05:50:55,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40505-0x100471594ef0001, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T05:50:55,849 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7746df3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7746df3 2024-11-16T05:50:55,849 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T05:50:55,863 DEBUG [M:0;3456ee6a3164:42349 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b3bdca8329a14a53a3622c665f0a5fb6 is 52, key is load_balancer_on/state:d/1731736255474/Put/seqid=0 2024-11-16T05:50:55,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37189 is added to blk_1073741842_1018 (size=5056) 2024-11-16T05:50:55,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741842_1018 (size=5056) 2024-11-16T05:50:55,868 INFO [M:0;3456ee6a3164:42349 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b3bdca8329a14a53a3622c665f0a5fb6 2024-11-16T05:50:55,873 DEBUG [M:0;3456ee6a3164:42349 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/54d0596f81c44311a344328c06262b95 as hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/54d0596f81c44311a344328c06262b95 2024-11-16T05:50:55,877 INFO [M:0;3456ee6a3164:42349 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/54d0596f81c44311a344328c06262b95, entries=8, sequenceid=29, filesize=5.5 K 2024-11-16T05:50:55,878 DEBUG [M:0;3456ee6a3164:42349 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a216c5b9ba6b4452b186a38c4ccb6053 as hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a216c5b9ba6b4452b186a38c4ccb6053 2024-11-16T05:50:55,883 INFO [M:0;3456ee6a3164:42349 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a216c5b9ba6b4452b186a38c4ccb6053, entries=3, sequenceid=29, filesize=5.2 K 2024-11-16T05:50:55,884 DEBUG [M:0;3456ee6a3164:42349 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e608e6db0d874107abcf56103d9df5aa as hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e608e6db0d874107abcf56103d9df5aa 2024-11-16T05:50:55,889 INFO [M:0;3456ee6a3164:42349 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e608e6db0d874107abcf56103d9df5aa, entries=1, sequenceid=29, filesize=5.0 K 2024-11-16T05:50:55,890 DEBUG [M:0;3456ee6a3164:42349 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b3bdca8329a14a53a3622c665f0a5fb6 as hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b3bdca8329a14a53a3622c665f0a5fb6 2024-11-16T05:50:55,895 INFO [M:0;3456ee6a3164:42349 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37551/user/jenkins/test-data/20b5f32a-a586-3c07-fd37-702dc0eac113/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b3bdca8329a14a53a3622c665f0a5fb6, entries=1, sequenceid=29, filesize=4.9 K 2024-11-16T05:50:55,897 INFO [M:0;3456ee6a3164:42349 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 130ms, sequenceid=29, compaction requested=false 2024-11-16T05:50:55,915 INFO [M:0;3456ee6a3164:42349 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T05:50:55,915 DEBUG [M:0;3456ee6a3164:42349 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731736255766Disabling compacts and flushes for region at 1731736255766Disabling writes for close at 1731736255766Obtaining lock to block concurrent updates at 1731736255766Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731736255766Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731736255767 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731736255767Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731736255767Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731736255782 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731736255782Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731736255792 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731736255807 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731736255807Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731736255817 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731736255830 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731736255830Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731736255844 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731736255863 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731736255863Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a902389: reopening flushed file at 1731736255872 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2c8f174c: reopening flushed file at 1731736255878 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7deba6aa: reopening flushed file at 1731736255883 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7eb62f9: reopening flushed file at 1731736255889 (+6 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 130ms, sequenceid=29, compaction requested=false at 1731736255897 (+8 ms)Writing region close event to WAL at 1731736255915 (+18 ms)Closed at 1731736255915 2024-11-16T05:50:55,916 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:55,916 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:55,916 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:55,916 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:55,916 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T05:50:55,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33197 is added to blk_1073741830_1006 (size=10311) 2024-11-16T05:50:55,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37189 is added to blk_1073741830_1006 (size=10311) 2024-11-16T05:50:55,923 INFO [M:0;3456ee6a3164:42349 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T05:50:55,923 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T05:50:55,923 INFO [M:0;3456ee6a3164:42349 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42349 2024-11-16T05:50:55,923 INFO [M:0;3456ee6a3164:42349 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T05:50:56,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42349-0x100471594ef0000, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T05:50:56,026 INFO [M:0;3456ee6a3164:42349 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T05:50:56,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42349-0x100471594ef0000, quorum=127.0.0.1:55071, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T05:50:56,029 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@37ed99c5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:50:56,029 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@68dc7da0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T05:50:56,029 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T05:50:56,029 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@660a9944{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T05:50:56,029 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@39601996{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/hadoop.log.dir/,STOPPED} 2024-11-16T05:50:56,031 WARN [BP-2095766484-172.17.0.2-1731736254006 heartbeating to localhost/127.0.0.1:37551 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T05:50:56,031 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T05:50:56,031 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T05:50:56,031 WARN [BP-2095766484-172.17.0.2-1731736254006 heartbeating to localhost/127.0.0.1:37551 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2095766484-172.17.0.2-1731736254006 (Datanode Uuid d854862b-b95c-4a0d-bb20-9200cd77370d) service to localhost/127.0.0.1:37551 2024-11-16T05:50:56,031 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/cluster_45e65205-c932-f817-7b85-9d115d338e0a/data/data3/current/BP-2095766484-172.17.0.2-1731736254006 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:50:56,032 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/cluster_45e65205-c932-f817-7b85-9d115d338e0a/data/data4/current/BP-2095766484-172.17.0.2-1731736254006 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:50:56,032 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T05:50:56,034 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@54fc4cc1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T05:50:56,034 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@25e8389f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T05:50:56,035 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T05:50:56,035 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f6a71ee{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T05:50:56,035 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@cad7b36{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/hadoop.log.dir/,STOPPED} 2024-11-16T05:50:56,036 WARN [BP-2095766484-172.17.0.2-1731736254006 heartbeating to localhost/127.0.0.1:37551 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T05:50:56,036 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T05:50:56,036 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T05:50:56,036 WARN [BP-2095766484-172.17.0.2-1731736254006 heartbeating to localhost/127.0.0.1:37551 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2095766484-172.17.0.2-1731736254006 (Datanode Uuid 32366372-f206-4ecf-bd50-622429ec3bb5) service to localhost/127.0.0.1:37551 2024-11-16T05:50:56,037 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/cluster_45e65205-c932-f817-7b85-9d115d338e0a/data/data1/current/BP-2095766484-172.17.0.2-1731736254006 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:50:56,037 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/cluster_45e65205-c932-f817-7b85-9d115d338e0a/data/data2/current/BP-2095766484-172.17.0.2-1731736254006 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T05:50:56,037 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T05:50:56,044 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4a619478{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T05:50:56,045 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1f1f87c1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T05:50:56,045 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T05:50:56,045 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@58c8824c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T05:50:56,045 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@52e40e10{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8ce7c27a-5627-50f1-0566-e63163f35930/hadoop.log.dir/,STOPPED} 2024-11-16T05:50:56,054 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T05:50:56,074 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T05:50:56,084 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=269 (was 231) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1436593709) connection to localhost/127.0.0.1:37551 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:37551 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37551 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37551 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37551 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1436593709) connection to localhost/127.0.0.1:37551 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1436593709) connection to localhost/127.0.0.1:37551 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37551 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=538 (was 515) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=166 (was 166), ProcessCount=11 (was 11), AvailableMemoryMB=3985 (was 2880) - AvailableMemoryMB LEAK? -